• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*---                                         mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2013 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
44 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
45 
46 #include "mc_include.h"
47 
48 /*------------------------------------------------------------*/
49 /*--- Defns                                                ---*/
50 /*------------------------------------------------------------*/
51 
52 /* Stats ... */
53 static SizeT cmalloc_n_mallocs  = 0;
54 static SizeT cmalloc_n_frees    = 0;
55 static ULong cmalloc_bs_mallocd = 0;
56 
57 /* For debug printing to do with mempools: what stack trace
58    depth to show. */
59 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60 
61 
62 /*------------------------------------------------------------*/
63 /*--- Tracking malloc'd and free'd blocks                  ---*/
64 /*------------------------------------------------------------*/
65 
66 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
67 
68 /* Record malloc'd blocks. */
69 VgHashTable MC_(malloc_list) = NULL;
70 
71 /* Memory pools: a hash table of MC_Mempools.  Search key is
72    MC_Mempool::pool. */
73 VgHashTable MC_(mempool_list) = NULL;
74 
75 /* Pool allocator for MC_Chunk. */
76 PoolAlloc *MC_(chunk_poolalloc) = NULL;
77 static
78 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
79                             MC_AllocKind kind);
80 static inline
81 void delete_MC_Chunk (MC_Chunk* mc);
82 
83 /* Records blocks after freeing. */
84 /* Blocks freed by the client are queued in one of two lists of
85    freed blocks not yet physically freed:
86    "big blocks" freed list.
87    "small blocks" freed list
88    The blocks with a size >= MC_(clo_freelist_big_blocks)
89    are linked in the big blocks freed list.
90    This allows a client to allocate and free big blocks
91    (e.g. bigger than VG_(clo_freelist_vol)) without losing
92    immediately all protection against dangling pointers.
93    position [0] is for big blocks, [1] is for small blocks. */
94 static MC_Chunk* freed_list_start[2]  = {NULL, NULL};
95 static MC_Chunk* freed_list_end[2]    = {NULL, NULL};
96 
97 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
98    some of the oldest blocks in the queue at the same time. */
add_to_freed_queue(MC_Chunk * mc)99 static void add_to_freed_queue ( MC_Chunk* mc )
100 {
101    const Bool show = False;
102    const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
103 
104    /* Put it at the end of the freed list, unless the block
105       would be directly released any way : in this case, we
106       put it at the head of the freed list. */
107    if (freed_list_end[l] == NULL) {
108       tl_assert(freed_list_start[l] == NULL);
109       mc->next = NULL;
110       freed_list_end[l]    = freed_list_start[l] = mc;
111    } else {
112       tl_assert(freed_list_end[l]->next == NULL);
113       if (mc->szB >= MC_(clo_freelist_vol)) {
114          mc->next = freed_list_start[l];
115          freed_list_start[l] = mc;
116       } else {
117          mc->next = NULL;
118          freed_list_end[l]->next = mc;
119          freed_list_end[l]       = mc;
120       }
121    }
122    VG_(free_queue_volume) += (Long)mc->szB;
123    if (show)
124       VG_(printf)("mc_freelist: acquire: volume now %lld\n",
125                   VG_(free_queue_volume));
126    VG_(free_queue_length)++;
127 }
128 
129 /* Release enough of the oldest blocks to bring the free queue
130    volume below vg_clo_freelist_vol.
131    Start with big block list first.
132    On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133    On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
release_oldest_block(void)134 static void release_oldest_block(void)
135 {
136    const Bool show = False;
137    int i;
138    tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
139    tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
140 
141    for (i = 0; i < 2; i++) {
142       while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
143              && freed_list_start[i] != NULL) {
144          MC_Chunk* mc1;
145 
146          tl_assert(freed_list_end[i] != NULL);
147 
148          mc1 = freed_list_start[i];
149          VG_(free_queue_volume) -= (Long)mc1->szB;
150          VG_(free_queue_length)--;
151          if (show)
152             VG_(printf)("mc_freelist: discard: volume now %lld\n",
153                         VG_(free_queue_volume));
154          tl_assert(VG_(free_queue_volume) >= 0);
155 
156          if (freed_list_start[i] == freed_list_end[i]) {
157             freed_list_start[i] = freed_list_end[i] = NULL;
158          } else {
159             freed_list_start[i] = mc1->next;
160          }
161          mc1->next = NULL; /* just paranoia */
162 
163          /* free MC_Chunk */
164          if (MC_AllocCustom != mc1->allockind)
165             VG_(cli_free) ( (void*)(mc1->data) );
166          delete_MC_Chunk ( mc1 );
167       }
168    }
169 }
170 
MC_(get_freed_block_bracketting)171 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
172 {
173    int i;
174    for (i = 0; i < 2; i++) {
175       MC_Chunk*  mc;
176       mc = freed_list_start[i];
177       while (mc) {
178          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
179                                     MC_(Malloc_Redzone_SzB) ))
180             return mc;
181          mc = mc->next;
182       }
183    }
184    return NULL;
185 }
186 
187 /* Allocate a shadow chunk, put it on the appropriate list.
188    If needed, release oldest blocks from freed list. */
189 static
create_MC_Chunk(ThreadId tid,Addr p,SizeT szB,MC_AllocKind kind)190 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
191                             MC_AllocKind kind)
192 {
193    MC_Chunk* mc  = VG_(allocEltPA)(MC_(chunk_poolalloc));
194    mc->data      = p;
195    mc->szB       = szB;
196    mc->allockind = kind;
197    switch ( MC_(n_where_pointers)() ) {
198       case 2: mc->where[1] = 0; // fallback to 1
199       case 1: mc->where[0] = 0; // fallback to 0
200       case 0: break;
201       default: tl_assert(0);
202    }
203    MC_(set_allocated_at) (tid, mc);
204 
205    /* Each time a new MC_Chunk is created, release oldest blocks
206       if the free list volume is exceeded. */
207    if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
208       release_oldest_block();
209 
210    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
211       the mc->data field isn't visible to the leak checker.  If memory
212       management is working correctly, any pointer returned by VG_(malloc)
213       should be noaccess as far as the client is concerned. */
214    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
215       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
216    }
217    return mc;
218 }
219 
220 static inline
delete_MC_Chunk(MC_Chunk * mc)221 void delete_MC_Chunk (MC_Chunk* mc)
222 {
223    VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
224 }
225 
226 // True if mc is in the given block list.
in_block_list(VgHashTable block_list,MC_Chunk * mc)227 static Bool in_block_list (VgHashTable block_list, MC_Chunk* mc)
228 {
229    MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
230    if (found_mc) {
231       tl_assert (found_mc->data == mc->data);
232       /* If a user builds a pool from a malloc-ed superblock
233          and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
234          an address at the beginning of this superblock, then
235          this address will be twice in the block_list.
236          We handle this case by checking size and allockind.
237          Note: I suspect that having the same block
238          twice in MC_(malloc_list) is a recipe for bugs.
239          We might maybe better create a "standard" mempool to
240          handle all this more cleanly. */
241       if (found_mc->szB != mc->szB
242           || found_mc->allockind != mc->allockind)
243          return False;
244       tl_assert (found_mc == mc);
245       return True;
246    } else
247       return False;
248 }
249 
250 // True if mc is a live block (not yet freed).
live_block(MC_Chunk * mc)251 static Bool live_block (MC_Chunk* mc)
252 {
253    if (mc->allockind == MC_AllocCustom) {
254       MC_Mempool* mp;
255       VG_(HT_ResetIter)(MC_(mempool_list));
256       while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
257          if ( in_block_list (mp->chunks, mc) )
258             return True;
259       }
260    }
261    /* Note: we fallback here for a not found MC_AllocCustom
262       as such a block can be inserted in MC_(malloc_list)
263       by VALGRIND_MALLOCLIKE_BLOCK. */
264    return in_block_list ( MC_(malloc_list), mc );
265 }
266 
MC_(allocated_at)267 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
268 {
269    switch (MC_(clo_keep_stacktraces)) {
270       case KS_none:            return VG_(null_ExeContext) ();
271       case KS_alloc:           return mc->where[0];
272       case KS_free:            return VG_(null_ExeContext) ();
273       case KS_alloc_then_free: return (live_block(mc) ?
274                                        mc->where[0] : VG_(null_ExeContext) ());
275       case KS_alloc_and_free:  return mc->where[0];
276       default: tl_assert (0);
277    }
278 }
279 
MC_(freed_at)280 ExeContext* MC_(freed_at) (MC_Chunk* mc)
281 {
282    switch (MC_(clo_keep_stacktraces)) {
283       case KS_none:            return VG_(null_ExeContext) ();
284       case KS_alloc:           return VG_(null_ExeContext) ();
285       case KS_free:            return (mc->where[0] ?
286                                        mc->where[0] : VG_(null_ExeContext) ());
287       case KS_alloc_then_free: return (live_block(mc) ?
288                                        VG_(null_ExeContext) () : mc->where[0]);
289       case KS_alloc_and_free:  return (mc->where[1] ?
290                                        mc->where[1] : VG_(null_ExeContext) ());
291       default: tl_assert (0);
292    }
293 }
294 
MC_(set_allocated_at)295 void  MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
296 {
297    switch (MC_(clo_keep_stacktraces)) {
298       case KS_none:            return;
299       case KS_alloc:           break;
300       case KS_free:            return;
301       case KS_alloc_then_free: break;
302       case KS_alloc_and_free:  break;
303       default: tl_assert (0);
304    }
305    mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
306 }
307 
MC_(set_freed_at)308 void  MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
309 {
310    UInt pos;
311    switch (MC_(clo_keep_stacktraces)) {
312       case KS_none:            return;
313       case KS_alloc:           return;
314       case KS_free:            pos = 0; break;
315       case KS_alloc_then_free: pos = 0; break;
316       case KS_alloc_and_free:  pos = 1; break;
317       default: tl_assert (0);
318    }
319    mc->where[pos] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
320 }
321 
MC_(n_where_pointers)322 UInt MC_(n_where_pointers) (void)
323 {
324    switch (MC_(clo_keep_stacktraces)) {
325       case KS_none:            return 0;
326       case KS_alloc:
327       case KS_free:
328       case KS_alloc_then_free: return 1;
329       case KS_alloc_and_free:  return 2;
330       default: tl_assert (0);
331    }
332 }
333 
334 /*------------------------------------------------------------*/
335 /*--- client_malloc(), etc                                 ---*/
336 /*------------------------------------------------------------*/
337 
338 // XXX: should make this a proper error (bug #79311).
complain_about_silly_args(SizeT sizeB,const HChar * fn)339 static Bool complain_about_silly_args(SizeT sizeB, const HChar* fn)
340 {
341    // Cast to a signed type to catch any unexpectedly negative args.  We're
342    // assuming here that the size asked for is not greater than 2^31 bytes
343    // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
344    if ((SSizeT)sizeB < 0) {
345       if (!VG_(clo_xml))
346          VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
347                       (SSizeT)sizeB, fn );
348       return True;
349    }
350    return False;
351 }
352 
complain_about_silly_args2(SizeT n,SizeT sizeB)353 static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
354 {
355    if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
356       if (!VG_(clo_xml))
357          VG_(message)(Vg_UserMsg,
358                       "Warning: silly args (%ld,%ld) to calloc()\n",
359                       (SSizeT)n, (SSizeT)sizeB);
360       return True;
361    }
362    return False;
363 }
364 
365 /* Allocate memory and note change in memory available */
MC_(new_block)366 void* MC_(new_block) ( ThreadId tid,
367                        Addr p, SizeT szB, SizeT alignB,
368                        Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
369 {
370    MC_Chunk* mc;
371 
372    // Allocate and zero if necessary
373    if (p) {
374       tl_assert(MC_AllocCustom == kind);
375    } else {
376       tl_assert(MC_AllocCustom != kind);
377       p = (Addr)VG_(cli_malloc)( alignB, szB );
378       if (!p) {
379          return NULL;
380       }
381       if (is_zeroed) {
382          VG_(memset)((void*)p, 0, szB);
383       } else
384       if (MC_(clo_malloc_fill) != -1) {
385          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
386          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
387       }
388    }
389 
390    // Only update stats if allocation succeeded.
391    cmalloc_n_mallocs ++;
392    cmalloc_bs_mallocd += (ULong)szB;
393    mc = create_MC_Chunk (tid, p, szB, kind);
394    VG_(HT_add_node)( table, mc );
395 
396    if (is_zeroed)
397       MC_(make_mem_defined)( p, szB );
398    else {
399       UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
400       tl_assert(VG_(is_plausible_ECU)(ecu));
401       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
402    }
403 
404    return (void*)p;
405 }
406 
MC_(malloc)407 void* MC_(malloc) ( ThreadId tid, SizeT n )
408 {
409    if (complain_about_silly_args(n, "malloc")) {
410       return NULL;
411    } else {
412       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
413          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
414    }
415 }
416 
MC_(__builtin_new)417 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
418 {
419    if (complain_about_silly_args(n, "__builtin_new")) {
420       return NULL;
421    } else {
422       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
423          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
424    }
425 }
426 
MC_(__builtin_vec_new)427 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
428 {
429    if (complain_about_silly_args(n, "__builtin_vec_new")) {
430       return NULL;
431    } else {
432       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
433          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
434    }
435 }
436 
MC_(memalign)437 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
438 {
439    if (complain_about_silly_args(n, "memalign")) {
440       return NULL;
441    } else {
442       return MC_(new_block) ( tid, 0, n, alignB,
443          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
444    }
445 }
446 
MC_(calloc)447 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
448 {
449    if (complain_about_silly_args2(nmemb, size1)) {
450       return NULL;
451    } else {
452       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
453          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
454    }
455 }
456 
457 static
die_and_free_mem(ThreadId tid,MC_Chunk * mc,SizeT rzB)458 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
459 {
460    /* Note: we do not free fill the custom allocs produced
461       by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
462    if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
463       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
464       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
465    }
466 
467    /* Note: make redzones noaccess again -- just in case user made them
468       accessible with a client request... */
469    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
470 
471    /* Record where freed */
472    MC_(set_freed_at) (tid, mc);
473    /* Put it out of harm's way for a while */
474    add_to_freed_queue ( mc );
475    /* If the free list volume is bigger than MC_(clo_freelist_vol),
476       we wait till the next block allocation to release blocks.
477       This increase the chance to discover dangling pointer usage,
478       even for big blocks being freed by the client. */
479 }
480 
481 
482 static
record_freemismatch_error(ThreadId tid,MC_Chunk * mc)483 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
484 {
485    /* MC_(record_freemismatch_error) reports errors for still
486       allocated blocks but we are in the middle of freeing it.  To
487       report the error correctly, we re-insert the chunk (making it
488       again a "clean allocated block", report the error, and then
489       re-remove the chunk.  This avoids to do a VG_(HT_lookup)
490       followed by a VG_(HT_remove) in all "non-erroneous cases". */
491    VG_(HT_add_node)( MC_(malloc_list), mc );
492    MC_(record_freemismatch_error) ( tid, mc );
493    if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
494       tl_assert(0);
495 }
496 
MC_(handle_free)497 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
498 {
499    MC_Chunk* mc;
500 
501    cmalloc_n_frees++;
502 
503    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504    if (mc == NULL) {
505       MC_(record_free_error) ( tid, p );
506    } else {
507       /* check if it is a matching free() / delete / delete [] */
508       if (kind != mc->allockind) {
509          tl_assert(p == mc->data);
510          record_freemismatch_error ( tid, mc );
511       }
512       die_and_free_mem ( tid, mc, rzB );
513    }
514 }
515 
MC_(free)516 void MC_(free) ( ThreadId tid, void* p )
517 {
518    MC_(handle_free)(
519       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
520 }
521 
MC_(__builtin_delete)522 void MC_(__builtin_delete) ( ThreadId tid, void* p )
523 {
524    MC_(handle_free)(
525       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
526 }
527 
MC_(__builtin_vec_delete)528 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
529 {
530    MC_(handle_free)(
531       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
532 }
533 
MC_(realloc)534 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
535 {
536    MC_Chunk* old_mc;
537    MC_Chunk* new_mc;
538    Addr      a_new;
539    SizeT     old_szB;
540 
541    if (complain_about_silly_args(new_szB, "realloc"))
542       return NULL;
543 
544    cmalloc_n_frees ++;
545    cmalloc_n_mallocs ++;
546    cmalloc_bs_mallocd += (ULong)new_szB;
547 
548    /* Remove the old block */
549    old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
550    if (old_mc == NULL) {
551       MC_(record_free_error) ( tid, (Addr)p_old );
552       /* We return to the program regardless. */
553       return NULL;
554    }
555 
556    /* check if its a matching free() / delete / delete [] */
557    if (MC_AllocMalloc != old_mc->allockind) {
558       /* can not realloc a range that was allocated with new or new [] */
559       tl_assert((Addr)p_old == old_mc->data);
560       record_freemismatch_error ( tid, old_mc );
561       /* but keep going anyway */
562    }
563 
564    old_szB = old_mc->szB;
565 
566    /* Get new memory */
567    a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
568 
569    if (a_new) {
570       /* In all cases, even when the new size is smaller or unchanged, we
571          reallocate and copy the contents, and make the old block
572          inaccessible.  This is so as to guarantee to catch all cases of
573          accesses via the old address after reallocation, regardless of
574          the change in size.  (Of course the ability to detect accesses
575          to the old block also depends on the size of the freed blocks
576          queue). */
577 
578       // Allocate a new chunk.
579       new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
580 
581       // Now insert the new mc (with a new 'data' field) into malloc_list.
582       VG_(HT_add_node)( MC_(malloc_list), new_mc );
583 
584       /* Retained part is copied, red zones set as normal */
585 
586       /* Redzone at the front */
587       MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
588                               MC_(Malloc_Redzone_SzB) );
589 
590       /* payload */
591       if (old_szB >= new_szB) {
592          /* new size is smaller or the same */
593 
594          /* Copy address range state and value from old to new */
595          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
596          VG_(memcpy)((void*)a_new, p_old, new_szB);
597       } else {
598          /* new size is bigger */
599          UInt        ecu;
600 
601          /* Copy address range state and value from old to new */
602          MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
603          VG_(memcpy)((void*)a_new, p_old, old_szB);
604 
605          // If the block has grown, we mark the grown area as undefined.
606          // We have to do that after VG_(HT_add_node) to ensure the ecu
607          // execontext is for a fully allocated block.
608          ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
609          tl_assert(VG_(is_plausible_ECU)(ecu));
610          MC_(make_mem_undefined_w_otag)( a_new+old_szB,
611                                          new_szB-old_szB,
612                                          ecu | MC_OKIND_HEAP );
613 
614          /* Possibly fill new area with specified junk */
615          if (MC_(clo_malloc_fill) != -1) {
616             tl_assert(MC_(clo_malloc_fill) >= 0x00
617                       && MC_(clo_malloc_fill) <= 0xFF);
618             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
619                                                 new_szB-old_szB);
620          }
621       }
622 
623       /* Redzone at the back. */
624       MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
625 
626       /* Possibly fill freed area with specified junk. */
627       if (MC_(clo_free_fill) != -1) {
628          tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
629          VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
630       }
631 
632       /* Free old memory */
633       /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634          than recycling the old one, so that any erroneous accesses to the
635          old memory are reported. */
636       die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
637 
638    } else {
639       /* Could not allocate new client memory.
640          Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641          unconditionally removed at the beginning of the function. */
642       VG_(HT_add_node)( MC_(malloc_list), old_mc );
643    }
644 
645    return (void*)a_new;
646 }
647 
MC_(malloc_usable_size)648 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
649 {
650    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
651 
652    // There may be slop, but pretend there isn't because only the asked-for
653    // area will be marked as addressable.
654    return ( mc ? mc->szB : 0 );
655 }
656 
657 /* This handles the in place resize of a block, as performed by the
658    VALGRIND_RESIZEINPLACE_BLOCK client request.  It is unrelated to,
659    and not used for, handling of the normal libc realloc()
660    function. */
MC_(handle_resizeInPlace)661 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
662                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
663 {
664    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665    if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
666       /* Reject if: p is not found, or oldSizeB is wrong,
667          or new block would be empty. */
668       MC_(record_free_error) ( tid, p );
669       return;
670    }
671 
672    if (oldSizeB == newSizeB)
673       return;
674 
675    mc->szB = newSizeB;
676    if (newSizeB < oldSizeB) {
677       MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
678    } else {
679       ExeContext* ec  = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
680       UInt        ecu = VG_(get_ECU_from_ExeContext)(ec);
681       MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
682                                       ecu | MC_OKIND_HEAP );
683       if (rzB > 0)
684          MC_(make_mem_noaccess)( p + newSizeB, rzB );
685    }
686 }
687 
688 
689 /*------------------------------------------------------------*/
690 /*--- Memory pool stuff.                                   ---*/
691 /*------------------------------------------------------------*/
692 
693 /* Set to 1 for intensive sanity checking.  Is very expensive though
694    and should not be used in production scenarios.  See #255966. */
695 #define MP_DETAILED_SANITY_CHECKS 0
696 
697 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
698 
699 
MC_(create_mempool)700 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
701 {
702    MC_Mempool* mp;
703 
704    if (VG_(clo_verbosity) > 2) {
705       VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
706                                pool, rzB, is_zeroed);
707       VG_(get_and_pp_StackTrace)
708          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
709    }
710 
711    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
712    if (mp != NULL) {
713      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
714    }
715 
716    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
717    mp->pool       = pool;
718    mp->rzB        = rzB;
719    mp->is_zeroed  = is_zeroed;
720    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
721    check_mempool_sane(mp);
722 
723    /* Paranoia ... ensure this area is off-limits to the client, so
724       the mp->data field isn't visible to the leak checker.  If memory
725       management is working correctly, anything pointer returned by
726       VG_(malloc) should be noaccess as far as the client is
727       concerned. */
728    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
729       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
730    }
731 
732    VG_(HT_add_node)( MC_(mempool_list), mp );
733 }
734 
MC_(destroy_mempool)735 void MC_(destroy_mempool)(Addr pool)
736 {
737    MC_Chunk*   mc;
738    MC_Mempool* mp;
739 
740    if (VG_(clo_verbosity) > 2) {
741       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
742       VG_(get_and_pp_StackTrace)
743          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
744    }
745 
746    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
747 
748    if (mp == NULL) {
749       ThreadId tid = VG_(get_running_tid)();
750       MC_(record_illegal_mempool_error) ( tid, pool );
751       return;
752    }
753    check_mempool_sane(mp);
754 
755    // Clean up the chunks, one by one
756    VG_(HT_ResetIter)(mp->chunks);
757    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
758       /* Note: make redzones noaccess again -- just in case user made them
759          accessible with a client request... */
760       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
761    }
762    // Destroy the chunk table
763    VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
764 
765    VG_(free)(mp);
766 }
767 
768 static Int
mp_compar(const void * n1,const void * n2)769 mp_compar(const void* n1, const void* n2)
770 {
771    const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
772    const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
773    if (mc1->data < mc2->data) return -1;
774    if (mc1->data > mc2->data) return  1;
775    return 0;
776 }
777 
778 static void
check_mempool_sane(MC_Mempool * mp)779 check_mempool_sane(MC_Mempool* mp)
780 {
781    UInt n_chunks, i, bad = 0;
782    static UInt tick = 0;
783 
784    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
785    if (!chunks)
786       return;
787 
788    if (VG_(clo_verbosity) > 1) {
789      if (tick++ >= 10000)
790        {
791 	 UInt total_pools = 0, total_chunks = 0;
792 	 MC_Mempool* mp2;
793 
794 	 VG_(HT_ResetIter)(MC_(mempool_list));
795 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
796 	   total_pools++;
797 	   VG_(HT_ResetIter)(mp2->chunks);
798 	   while (VG_(HT_Next)(mp2->chunks)) {
799 	     total_chunks++;
800 	   }
801 	 }
802 
803          VG_(message)(Vg_UserMsg,
804                       "Total mempools active: %d pools, %d chunks\n",
805 		      total_pools, total_chunks);
806 	 tick = 0;
807        }
808    }
809 
810 
811    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
812 
813    /* Sanity check; assert that the blocks are now in order */
814    for (i = 0; i < n_chunks-1; i++) {
815       if (chunks[i]->data > chunks[i+1]->data) {
816          VG_(message)(Vg_UserMsg,
817                       "Mempool chunk %d / %d is out of order "
818                       "wrt. its successor\n",
819                       i+1, n_chunks);
820          bad = 1;
821       }
822    }
823 
824    /* Sanity check -- make sure they don't overlap */
825    for (i = 0; i < n_chunks-1; i++) {
826       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
827          VG_(message)(Vg_UserMsg,
828                       "Mempool chunk %d / %d overlaps with its successor\n",
829                       i+1, n_chunks);
830          bad = 1;
831       }
832    }
833 
834    if (bad) {
835          VG_(message)(Vg_UserMsg,
836                 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
837                 n_chunks);
838          for (i = 0; i < n_chunks; ++i) {
839             VG_(message)(Vg_UserMsg,
840                          "Mempool chunk %d / %d: %ld bytes "
841                          "[%lx,%lx), allocated:\n",
842                          i+1,
843                          n_chunks,
844                          chunks[i]->szB + 0UL,
845                          chunks[i]->data,
846                          chunks[i]->data + chunks[i]->szB);
847 
848             VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
849          }
850    }
851    VG_(free)(chunks);
852 }
853 
MC_(mempool_alloc)854 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
855 {
856    MC_Mempool* mp;
857 
858    if (VG_(clo_verbosity) > 2) {
859       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
860                                pool, addr, szB);
861       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
862    }
863 
864    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
865    if (mp == NULL) {
866       MC_(record_illegal_mempool_error) ( tid, pool );
867    } else {
868       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
869       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
870                      MC_AllocCustom, mp->chunks);
871       if (mp->rzB > 0) {
872          // This is not needed if the user application has properly
873          // marked the superblock noaccess when defining the mempool.
874          // We however still mark the redzones noaccess to still catch
875          // some bugs if user forgot.
876          MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
877          MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
878       }
879       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
880    }
881 }
882 
MC_(mempool_free)883 void MC_(mempool_free)(Addr pool, Addr addr)
884 {
885    MC_Mempool*  mp;
886    MC_Chunk*    mc;
887    ThreadId     tid = VG_(get_running_tid)();
888 
889    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
890    if (mp == NULL) {
891       MC_(record_illegal_mempool_error)(tid, pool);
892       return;
893    }
894 
895    if (VG_(clo_verbosity) > 2) {
896       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
897       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
898    }
899 
900    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
901    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
902    if (mc == NULL) {
903       MC_(record_free_error)(tid, (Addr)addr);
904       return;
905    }
906 
907    if (VG_(clo_verbosity) > 2) {
908       VG_(message)(Vg_UserMsg,
909 		   "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
910 		   pool, addr, mc->szB + 0UL);
911    }
912 
913    die_and_free_mem ( tid, mc, mp->rzB );
914    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
915 }
916 
917 
MC_(mempool_trim)918 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
919 {
920    MC_Mempool*  mp;
921    MC_Chunk*    mc;
922    ThreadId     tid = VG_(get_running_tid)();
923    UInt         n_shadows, i;
924    VgHashNode** chunks;
925 
926    if (VG_(clo_verbosity) > 2) {
927       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
928                                pool, addr, szB);
929       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
930    }
931 
932    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
933    if (mp == NULL) {
934       MC_(record_illegal_mempool_error)(tid, pool);
935       return;
936    }
937 
938    check_mempool_sane(mp);
939    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
940    if (n_shadows == 0) {
941      tl_assert(chunks == NULL);
942      return;
943    }
944 
945    tl_assert(chunks != NULL);
946    for (i = 0; i < n_shadows; ++i) {
947 
948       Addr lo, hi, min, max;
949 
950       mc = (MC_Chunk*) chunks[i];
951 
952       lo = mc->data;
953       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
954 
955 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
956 
957       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
958 
959          /* The current chunk is entirely within the trim extent: keep
960             it. */
961 
962          continue;
963 
964       } else if ( (! EXTENT_CONTAINS(lo)) &&
965                   (! EXTENT_CONTAINS(hi)) ) {
966 
967          /* The current chunk is entirely outside the trim extent:
968             delete it. */
969 
970          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
971             MC_(record_free_error)(tid, (Addr)mc->data);
972             VG_(free)(chunks);
973             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
974             return;
975          }
976          die_and_free_mem ( tid, mc, mp->rzB );
977 
978       } else {
979 
980          /* The current chunk intersects the trim extent: remove,
981             trim, and reinsert it. */
982 
983          tl_assert(EXTENT_CONTAINS(lo) ||
984                    EXTENT_CONTAINS(hi));
985          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
986             MC_(record_free_error)(tid, (Addr)mc->data);
987             VG_(free)(chunks);
988             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
989             return;
990          }
991 
992          if (mc->data < addr) {
993            min = mc->data;
994            lo = addr;
995          } else {
996            min = addr;
997            lo = mc->data;
998          }
999 
1000          if (mc->data + szB > addr + szB) {
1001            max = mc->data + szB;
1002            hi = addr + szB;
1003          } else {
1004            max = addr + szB;
1005            hi = mc->data + szB;
1006          }
1007 
1008          tl_assert(min <= lo);
1009          tl_assert(lo < hi);
1010          tl_assert(hi <= max);
1011 
1012          if (min < lo && !EXTENT_CONTAINS(min)) {
1013            MC_(make_mem_noaccess)( min, lo - min);
1014          }
1015 
1016          if (hi < max && !EXTENT_CONTAINS(max)) {
1017            MC_(make_mem_noaccess)( hi, max - hi );
1018          }
1019 
1020          mc->data = lo;
1021          mc->szB = (UInt) (hi - lo);
1022          VG_(HT_add_node)( mp->chunks, mc );
1023       }
1024 
1025 #undef EXTENT_CONTAINS
1026 
1027    }
1028    check_mempool_sane(mp);
1029    VG_(free)(chunks);
1030 }
1031 
MC_(move_mempool)1032 void MC_(move_mempool)(Addr poolA, Addr poolB)
1033 {
1034    MC_Mempool* mp;
1035 
1036    if (VG_(clo_verbosity) > 2) {
1037       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1038       VG_(get_and_pp_StackTrace)
1039          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1040    }
1041 
1042    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1043 
1044    if (mp == NULL) {
1045       ThreadId tid = VG_(get_running_tid)();
1046       MC_(record_illegal_mempool_error) ( tid, poolA );
1047       return;
1048    }
1049 
1050    mp->pool = poolB;
1051    VG_(HT_add_node)( MC_(mempool_list), mp );
1052 }
1053 
MC_(mempool_change)1054 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1055 {
1056    MC_Mempool*  mp;
1057    MC_Chunk*    mc;
1058    ThreadId     tid = VG_(get_running_tid)();
1059 
1060    if (VG_(clo_verbosity) > 2) {
1061       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
1062                    pool, addrA, addrB, szB);
1063       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1064    }
1065 
1066    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1067    if (mp == NULL) {
1068       MC_(record_illegal_mempool_error)(tid, pool);
1069       return;
1070    }
1071 
1072    check_mempool_sane(mp);
1073 
1074    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1075    if (mc == NULL) {
1076       MC_(record_free_error)(tid, (Addr)addrA);
1077       return;
1078    }
1079 
1080    mc->data = addrB;
1081    mc->szB  = szB;
1082    VG_(HT_add_node)( mp->chunks, mc );
1083 
1084    check_mempool_sane(mp);
1085 }
1086 
MC_(mempool_exists)1087 Bool MC_(mempool_exists)(Addr pool)
1088 {
1089    MC_Mempool*  mp;
1090 
1091    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1092    if (mp == NULL) {
1093        return False;
1094    }
1095    return True;
1096 }
1097 
1098 
1099 /*------------------------------------------------------------*/
1100 /*--- Statistics printing                                  ---*/
1101 /*------------------------------------------------------------*/
1102 
MC_(print_malloc_stats)1103 void MC_(print_malloc_stats) ( void )
1104 {
1105    MC_Chunk* mc;
1106    SizeT     nblocks = 0;
1107    ULong     nbytes  = 0;
1108 
1109    if (VG_(clo_verbosity) == 0)
1110       return;
1111    if (VG_(clo_xml))
1112       return;
1113 
1114    /* Count memory still in use. */
1115    VG_(HT_ResetIter)(MC_(malloc_list));
1116    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1117       nblocks++;
1118       nbytes += (ULong)mc->szB;
1119    }
1120 
1121    VG_(umsg)(
1122       "HEAP SUMMARY:\n"
1123       "    in use at exit: %'llu bytes in %'lu blocks\n"
1124       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1125       "\n",
1126       nbytes, nblocks,
1127       cmalloc_n_mallocs,
1128       cmalloc_n_frees, cmalloc_bs_mallocd
1129    );
1130 }
1131 
MC_(get_cmalloc_n_frees)1132 SizeT MC_(get_cmalloc_n_frees) ( void )
1133 {
1134    return cmalloc_n_frees;
1135 }
1136 
1137 
1138 /*--------------------------------------------------------------------*/
1139 /*--- end                                                          ---*/
1140 /*--------------------------------------------------------------------*/
1141