• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*---                                         mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2015 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
44 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
45 
46 #include "mc_include.h"
47 
48 /*------------------------------------------------------------*/
49 /*--- Defns                                                ---*/
50 /*------------------------------------------------------------*/
51 
52 /* Stats ... */
53 static SizeT cmalloc_n_mallocs  = 0;
54 static SizeT cmalloc_n_frees    = 0;
55 static ULong cmalloc_bs_mallocd = 0;
56 
57 /* For debug printing to do with mempools: what stack trace
58    depth to show. */
59 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60 
61 
62 /*------------------------------------------------------------*/
63 /*--- Tracking malloc'd and free'd blocks                  ---*/
64 /*------------------------------------------------------------*/
65 
66 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
67 
68 /* Record malloc'd blocks. */
69 VgHashTable *MC_(malloc_list) = NULL;
70 
71 /* Memory pools: a hash table of MC_Mempools.  Search key is
72    MC_Mempool::pool. */
73 VgHashTable *MC_(mempool_list) = NULL;
74 
75 /* Pool allocator for MC_Chunk. */
76 PoolAlloc *MC_(chunk_poolalloc) = NULL;
77 static
78 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
79                             MC_AllocKind kind);
80 static inline
81 void delete_MC_Chunk (MC_Chunk* mc);
82 
83 /* Records blocks after freeing. */
84 /* Blocks freed by the client are queued in one of two lists of
85    freed blocks not yet physically freed:
86    "big blocks" freed list.
87    "small blocks" freed list
88    The blocks with a size >= MC_(clo_freelist_big_blocks)
89    are linked in the big blocks freed list.
90    This allows a client to allocate and free big blocks
91    (e.g. bigger than VG_(clo_freelist_vol)) without losing
92    immediately all protection against dangling pointers.
93    position [0] is for big blocks, [1] is for small blocks. */
94 static MC_Chunk* freed_list_start[2]  = {NULL, NULL};
95 static MC_Chunk* freed_list_end[2]    = {NULL, NULL};
96 
97 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
98    some of the oldest blocks in the queue at the same time. */
add_to_freed_queue(MC_Chunk * mc)99 static void add_to_freed_queue ( MC_Chunk* mc )
100 {
101    const Bool show = False;
102    const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
103 
104    /* Put it at the end of the freed list, unless the block
105       would be directly released any way : in this case, we
106       put it at the head of the freed list. */
107    if (freed_list_end[l] == NULL) {
108       tl_assert(freed_list_start[l] == NULL);
109       mc->next = NULL;
110       freed_list_end[l]    = freed_list_start[l] = mc;
111    } else {
112       tl_assert(freed_list_end[l]->next == NULL);
113       if (mc->szB >= MC_(clo_freelist_vol)) {
114          mc->next = freed_list_start[l];
115          freed_list_start[l] = mc;
116       } else {
117          mc->next = NULL;
118          freed_list_end[l]->next = mc;
119          freed_list_end[l]       = mc;
120       }
121    }
122    VG_(free_queue_volume) += (Long)mc->szB;
123    if (show)
124       VG_(printf)("mc_freelist: acquire: volume now %lld\n",
125                   VG_(free_queue_volume));
126    VG_(free_queue_length)++;
127 }
128 
129 /* Release enough of the oldest blocks to bring the free queue
130    volume below vg_clo_freelist_vol.
131    Start with big block list first.
132    On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133    On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
release_oldest_block(void)134 static void release_oldest_block(void)
135 {
136    const Bool show = False;
137    int i;
138    tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
139    tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
140 
141    for (i = 0; i < 2; i++) {
142       while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
143              && freed_list_start[i] != NULL) {
144          MC_Chunk* mc1;
145 
146          tl_assert(freed_list_end[i] != NULL);
147 
148          mc1 = freed_list_start[i];
149          VG_(free_queue_volume) -= (Long)mc1->szB;
150          VG_(free_queue_length)--;
151          if (show)
152             VG_(printf)("mc_freelist: discard: volume now %lld\n",
153                         VG_(free_queue_volume));
154          tl_assert(VG_(free_queue_volume) >= 0);
155 
156          if (freed_list_start[i] == freed_list_end[i]) {
157             freed_list_start[i] = freed_list_end[i] = NULL;
158          } else {
159             freed_list_start[i] = mc1->next;
160          }
161          mc1->next = NULL; /* just paranoia */
162 
163          /* free MC_Chunk */
164          if (MC_AllocCustom != mc1->allockind)
165             VG_(cli_free) ( (void*)(mc1->data) );
166          delete_MC_Chunk ( mc1 );
167       }
168    }
169 }
170 
MC_(get_freed_block_bracketting)171 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
172 {
173    int i;
174    for (i = 0; i < 2; i++) {
175       MC_Chunk*  mc;
176       mc = freed_list_start[i];
177       while (mc) {
178          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
179                                     MC_(Malloc_Redzone_SzB) ))
180             return mc;
181          mc = mc->next;
182       }
183    }
184    return NULL;
185 }
186 
187 /* Allocate a shadow chunk, put it on the appropriate list.
188    If needed, release oldest blocks from freed list. */
189 static
create_MC_Chunk(ThreadId tid,Addr p,SizeT szB,MC_AllocKind kind)190 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
191                             MC_AllocKind kind)
192 {
193    MC_Chunk* mc  = VG_(allocEltPA)(MC_(chunk_poolalloc));
194    mc->data      = p;
195    mc->szB       = szB;
196    mc->allockind = kind;
197    switch ( MC_(n_where_pointers)() ) {
198       case 2: mc->where[1] = 0; // fallback to 1
199       case 1: mc->where[0] = 0; // fallback to 0
200       case 0: break;
201       default: tl_assert(0);
202    }
203    MC_(set_allocated_at) (tid, mc);
204 
205    /* Each time a new MC_Chunk is created, release oldest blocks
206       if the free list volume is exceeded. */
207    if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
208       release_oldest_block();
209 
210    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
211       the mc->data field isn't visible to the leak checker.  If memory
212       management is working correctly, any pointer returned by VG_(malloc)
213       should be noaccess as far as the client is concerned. */
214    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
215       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
216    }
217    return mc;
218 }
219 
220 static inline
delete_MC_Chunk(MC_Chunk * mc)221 void delete_MC_Chunk (MC_Chunk* mc)
222 {
223    VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
224 }
225 
226 // True if mc is in the given block list.
in_block_list(const VgHashTable * block_list,MC_Chunk * mc)227 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
228 {
229    MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
230    if (found_mc) {
231       tl_assert (found_mc->data == mc->data);
232       /* If a user builds a pool from a malloc-ed superblock
233          and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
234          an address at the beginning of this superblock, then
235          this address will be twice in the block_list.
236          We handle this case by checking size and allockind.
237          Note: I suspect that having the same block
238          twice in MC_(malloc_list) is a recipe for bugs.
239          We might maybe better create a "standard" mempool to
240          handle all this more cleanly. */
241       if (found_mc->szB != mc->szB
242           || found_mc->allockind != mc->allockind)
243          return False;
244       tl_assert (found_mc == mc);
245       return True;
246    } else
247       return False;
248 }
249 
250 // True if mc is a live block (not yet freed).
live_block(MC_Chunk * mc)251 static Bool live_block (MC_Chunk* mc)
252 {
253    if (mc->allockind == MC_AllocCustom) {
254       MC_Mempool* mp;
255       VG_(HT_ResetIter)(MC_(mempool_list));
256       while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
257          if ( in_block_list (mp->chunks, mc) )
258             return True;
259       }
260    }
261    /* Note: we fallback here for a not found MC_AllocCustom
262       as such a block can be inserted in MC_(malloc_list)
263       by VALGRIND_MALLOCLIKE_BLOCK. */
264    return in_block_list ( MC_(malloc_list), mc );
265 }
266 
MC_(allocated_at)267 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
268 {
269    switch (MC_(clo_keep_stacktraces)) {
270       case KS_none:            return VG_(null_ExeContext) ();
271       case KS_alloc:           return mc->where[0];
272       case KS_free:            return VG_(null_ExeContext) ();
273       case KS_alloc_then_free: return (live_block(mc) ?
274                                        mc->where[0] : VG_(null_ExeContext) ());
275       case KS_alloc_and_free:  return mc->where[0];
276       default: tl_assert (0);
277    }
278 }
279 
MC_(freed_at)280 ExeContext* MC_(freed_at) (MC_Chunk* mc)
281 {
282    switch (MC_(clo_keep_stacktraces)) {
283       case KS_none:            return VG_(null_ExeContext) ();
284       case KS_alloc:           return VG_(null_ExeContext) ();
285       case KS_free:            return (mc->where[0] ?
286                                        mc->where[0] : VG_(null_ExeContext) ());
287       case KS_alloc_then_free: return (live_block(mc) ?
288                                        VG_(null_ExeContext) () : mc->where[0]);
289       case KS_alloc_and_free:  return (mc->where[1] ?
290                                        mc->where[1] : VG_(null_ExeContext) ());
291       default: tl_assert (0);
292    }
293 }
294 
MC_(set_allocated_at)295 void  MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
296 {
297    switch (MC_(clo_keep_stacktraces)) {
298       case KS_none:            return;
299       case KS_alloc:           break;
300       case KS_free:            return;
301       case KS_alloc_then_free: break;
302       case KS_alloc_and_free:  break;
303       default: tl_assert (0);
304    }
305    mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
306 }
307 
MC_(set_freed_at)308 void  MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
309 {
310    UInt pos;
311    switch (MC_(clo_keep_stacktraces)) {
312       case KS_none:            return;
313       case KS_alloc:           return;
314       case KS_free:            pos = 0; break;
315       case KS_alloc_then_free: pos = 0; break;
316       case KS_alloc_and_free:  pos = 1; break;
317       default: tl_assert (0);
318    }
319    mc->where[pos] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
320 }
321 
MC_(n_where_pointers)322 UInt MC_(n_where_pointers) (void)
323 {
324    switch (MC_(clo_keep_stacktraces)) {
325       case KS_none:            return 0;
326       case KS_alloc:
327       case KS_free:
328       case KS_alloc_then_free: return 1;
329       case KS_alloc_and_free:  return 2;
330       default: tl_assert (0);
331    }
332 }
333 
334 /*------------------------------------------------------------*/
335 /*--- client_malloc(), etc                                 ---*/
336 /*------------------------------------------------------------*/
337 
338 /* Allocate memory and note change in memory available */
MC_(new_block)339 void* MC_(new_block) ( ThreadId tid,
340                        Addr p, SizeT szB, SizeT alignB,
341                        Bool is_zeroed, MC_AllocKind kind, VgHashTable *table)
342 {
343    MC_Chunk* mc;
344 
345    // Allocate and zero if necessary
346    if (p) {
347       tl_assert(MC_AllocCustom == kind);
348    } else {
349       tl_assert(MC_AllocCustom != kind);
350       p = (Addr)VG_(cli_malloc)( alignB, szB );
351       if (!p) {
352          return NULL;
353       }
354       if (is_zeroed) {
355          VG_(memset)((void*)p, 0, szB);
356       } else
357       if (MC_(clo_malloc_fill) != -1) {
358          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
359          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
360       }
361    }
362 
363    // Only update stats if allocation succeeded.
364    cmalloc_n_mallocs ++;
365    cmalloc_bs_mallocd += (ULong)szB;
366    mc = create_MC_Chunk (tid, p, szB, kind);
367    VG_(HT_add_node)( table, mc );
368 
369    if (is_zeroed)
370       MC_(make_mem_defined)( p, szB );
371    else {
372       UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
373       tl_assert(VG_(is_plausible_ECU)(ecu));
374       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
375    }
376 
377    return (void*)p;
378 }
379 
MC_(malloc)380 void* MC_(malloc) ( ThreadId tid, SizeT n )
381 {
382    if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
383       return NULL;
384    } else {
385       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
386          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
387    }
388 }
389 
MC_(__builtin_new)390 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
391 {
392    if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
393       return NULL;
394    } else {
395       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
396          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
397    }
398 }
399 
MC_(__builtin_vec_new)400 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
401 {
402    if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
403       return NULL;
404    } else {
405       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
406          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
407    }
408 }
409 
MC_(memalign)410 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
411 {
412    if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
413       return NULL;
414    } else {
415       return MC_(new_block) ( tid, 0, n, alignB,
416          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
417    }
418 }
419 
MC_(calloc)420 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
421 {
422    if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
423        MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
424       return NULL;
425    } else {
426       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
427          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
428    }
429 }
430 
431 static
die_and_free_mem(ThreadId tid,MC_Chunk * mc,SizeT rzB)432 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
433 {
434    /* Note: we do not free fill the custom allocs produced
435       by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
436    if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
437       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
438       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
439    }
440 
441    /* Note: make redzones noaccess again -- just in case user made them
442       accessible with a client request... */
443    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
444 
445    /* Record where freed */
446    MC_(set_freed_at) (tid, mc);
447    /* Put it out of harm's way for a while */
448    add_to_freed_queue ( mc );
449    /* If the free list volume is bigger than MC_(clo_freelist_vol),
450       we wait till the next block allocation to release blocks.
451       This increase the chance to discover dangling pointer usage,
452       even for big blocks being freed by the client. */
453 }
454 
455 
456 static
record_freemismatch_error(ThreadId tid,MC_Chunk * mc)457 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
458 {
459    /* Only show such an error if the user hasn't disabled doing so. */
460    if (!MC_(clo_show_mismatched_frees))
461       return;
462 
463    /* MC_(record_freemismatch_error) reports errors for still
464       allocated blocks but we are in the middle of freeing it.  To
465       report the error correctly, we re-insert the chunk (making it
466       again a "clean allocated block", report the error, and then
467       re-remove the chunk.  This avoids to do a VG_(HT_lookup)
468       followed by a VG_(HT_remove) in all "non-erroneous cases". */
469    VG_(HT_add_node)( MC_(malloc_list), mc );
470    MC_(record_freemismatch_error) ( tid, mc );
471    if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
472       tl_assert(0);
473 }
474 
MC_(handle_free)475 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
476 {
477    MC_Chunk* mc;
478 
479    cmalloc_n_frees++;
480 
481    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
482    if (mc == NULL) {
483       MC_(record_free_error) ( tid, p );
484    } else {
485       /* check if it is a matching free() / delete / delete [] */
486       if (kind != mc->allockind) {
487          tl_assert(p == mc->data);
488          record_freemismatch_error ( tid, mc );
489       }
490       die_and_free_mem ( tid, mc, rzB );
491    }
492 }
493 
MC_(free)494 void MC_(free) ( ThreadId tid, void* p )
495 {
496    MC_(handle_free)(
497       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
498 }
499 
MC_(__builtin_delete)500 void MC_(__builtin_delete) ( ThreadId tid, void* p )
501 {
502    MC_(handle_free)(
503       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
504 }
505 
MC_(__builtin_vec_delete)506 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
507 {
508    MC_(handle_free)(
509       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
510 }
511 
MC_(realloc)512 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
513 {
514    MC_Chunk* old_mc;
515    MC_Chunk* new_mc;
516    Addr      a_new;
517    SizeT     old_szB;
518 
519    if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
520       return NULL;
521 
522    cmalloc_n_frees ++;
523    cmalloc_n_mallocs ++;
524    cmalloc_bs_mallocd += (ULong)new_szB;
525 
526    /* Remove the old block */
527    old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
528    if (old_mc == NULL) {
529       MC_(record_free_error) ( tid, (Addr)p_old );
530       /* We return to the program regardless. */
531       return NULL;
532    }
533 
534    /* check if its a matching free() / delete / delete [] */
535    if (MC_AllocMalloc != old_mc->allockind) {
536       /* can not realloc a range that was allocated with new or new [] */
537       tl_assert((Addr)p_old == old_mc->data);
538       record_freemismatch_error ( tid, old_mc );
539       /* but keep going anyway */
540    }
541 
542    old_szB = old_mc->szB;
543 
544    /* Get new memory */
545    a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
546 
547    if (a_new) {
548       /* In all cases, even when the new size is smaller or unchanged, we
549          reallocate and copy the contents, and make the old block
550          inaccessible.  This is so as to guarantee to catch all cases of
551          accesses via the old address after reallocation, regardless of
552          the change in size.  (Of course the ability to detect accesses
553          to the old block also depends on the size of the freed blocks
554          queue). */
555 
556       // Allocate a new chunk.
557       new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
558 
559       // Now insert the new mc (with a new 'data' field) into malloc_list.
560       VG_(HT_add_node)( MC_(malloc_list), new_mc );
561 
562       /* Retained part is copied, red zones set as normal */
563 
564       /* Redzone at the front */
565       MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
566                               MC_(Malloc_Redzone_SzB) );
567 
568       /* payload */
569       if (old_szB >= new_szB) {
570          /* new size is smaller or the same */
571 
572          /* Copy address range state and value from old to new */
573          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
574          VG_(memcpy)((void*)a_new, p_old, new_szB);
575       } else {
576          /* new size is bigger */
577          UInt        ecu;
578 
579          /* Copy address range state and value from old to new */
580          MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
581          VG_(memcpy)((void*)a_new, p_old, old_szB);
582 
583          // If the block has grown, we mark the grown area as undefined.
584          // We have to do that after VG_(HT_add_node) to ensure the ecu
585          // execontext is for a fully allocated block.
586          ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
587          tl_assert(VG_(is_plausible_ECU)(ecu));
588          MC_(make_mem_undefined_w_otag)( a_new+old_szB,
589                                          new_szB-old_szB,
590                                          ecu | MC_OKIND_HEAP );
591 
592          /* Possibly fill new area with specified junk */
593          if (MC_(clo_malloc_fill) != -1) {
594             tl_assert(MC_(clo_malloc_fill) >= 0x00
595                       && MC_(clo_malloc_fill) <= 0xFF);
596             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
597                                                 new_szB-old_szB);
598          }
599       }
600 
601       /* Redzone at the back. */
602       MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
603 
604       /* Possibly fill freed area with specified junk. */
605       if (MC_(clo_free_fill) != -1) {
606          tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
607          VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
608       }
609 
610       /* Free old memory */
611       /* Nb: we have to allocate a new MC_Chunk for the new memory rather
612          than recycling the old one, so that any erroneous accesses to the
613          old memory are reported. */
614       die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
615 
616    } else {
617       /* Could not allocate new client memory.
618          Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
619          unconditionally removed at the beginning of the function. */
620       VG_(HT_add_node)( MC_(malloc_list), old_mc );
621    }
622 
623    return (void*)a_new;
624 }
625 
MC_(malloc_usable_size)626 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
627 {
628    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
629 
630    // There may be slop, but pretend there isn't because only the asked-for
631    // area will be marked as addressable.
632    return ( mc ? mc->szB : 0 );
633 }
634 
635 /* This handles the in place resize of a block, as performed by the
636    VALGRIND_RESIZEINPLACE_BLOCK client request.  It is unrelated to,
637    and not used for, handling of the normal libc realloc()
638    function. */
MC_(handle_resizeInPlace)639 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
640                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
641 {
642    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
643    if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
644       /* Reject if: p is not found, or oldSizeB is wrong,
645          or new block would be empty. */
646       MC_(record_free_error) ( tid, p );
647       return;
648    }
649 
650    if (oldSizeB == newSizeB)
651       return;
652 
653    mc->szB = newSizeB;
654    if (newSizeB < oldSizeB) {
655       MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
656    } else {
657       ExeContext* ec  = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
658       UInt        ecu = VG_(get_ECU_from_ExeContext)(ec);
659       MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
660                                       ecu | MC_OKIND_HEAP );
661       if (rzB > 0)
662          MC_(make_mem_noaccess)( p + newSizeB, rzB );
663    }
664 }
665 
666 
667 /*------------------------------------------------------------*/
668 /*--- Memory pool stuff.                                   ---*/
669 /*------------------------------------------------------------*/
670 
671 /* Set to 1 for intensive sanity checking.  Is very expensive though
672    and should not be used in production scenarios.  See #255966. */
673 #define MP_DETAILED_SANITY_CHECKS 0
674 
675 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
676 
677 
MC_(create_mempool)678 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
679 {
680    MC_Mempool* mp;
681 
682    if (VG_(clo_verbosity) > 2) {
683       VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %u, %d)\n",
684                                pool, rzB, is_zeroed);
685       VG_(get_and_pp_StackTrace)
686          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
687    }
688 
689    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
690    if (mp != NULL) {
691      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
692    }
693 
694    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
695    mp->pool       = pool;
696    mp->rzB        = rzB;
697    mp->is_zeroed  = is_zeroed;
698    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
699    check_mempool_sane(mp);
700 
701    /* Paranoia ... ensure this area is off-limits to the client, so
702       the mp->data field isn't visible to the leak checker.  If memory
703       management is working correctly, anything pointer returned by
704       VG_(malloc) should be noaccess as far as the client is
705       concerned. */
706    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
707       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
708    }
709 
710    VG_(HT_add_node)( MC_(mempool_list), mp );
711 }
712 
MC_(destroy_mempool)713 void MC_(destroy_mempool)(Addr pool)
714 {
715    MC_Chunk*   mc;
716    MC_Mempool* mp;
717 
718    if (VG_(clo_verbosity) > 2) {
719       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
720       VG_(get_and_pp_StackTrace)
721          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
722    }
723 
724    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
725 
726    if (mp == NULL) {
727       ThreadId tid = VG_(get_running_tid)();
728       MC_(record_illegal_mempool_error) ( tid, pool );
729       return;
730    }
731    check_mempool_sane(mp);
732 
733    // Clean up the chunks, one by one
734    VG_(HT_ResetIter)(mp->chunks);
735    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
736       /* Note: make redzones noaccess again -- just in case user made them
737          accessible with a client request... */
738       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
739    }
740    // Destroy the chunk table
741    VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
742 
743    VG_(free)(mp);
744 }
745 
746 static Int
mp_compar(const void * n1,const void * n2)747 mp_compar(const void* n1, const void* n2)
748 {
749    const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
750    const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
751    if (mc1->data < mc2->data) return -1;
752    if (mc1->data > mc2->data) return  1;
753    return 0;
754 }
755 
756 static void
check_mempool_sane(MC_Mempool * mp)757 check_mempool_sane(MC_Mempool* mp)
758 {
759    UInt n_chunks, i, bad = 0;
760    static UInt tick = 0;
761 
762    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
763    if (!chunks)
764       return;
765 
766    if (VG_(clo_verbosity) > 1) {
767      if (tick++ >= 10000)
768        {
769 	 UInt total_pools = 0, total_chunks = 0;
770 	 MC_Mempool* mp2;
771 
772 	 VG_(HT_ResetIter)(MC_(mempool_list));
773 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
774 	   total_pools++;
775 	   VG_(HT_ResetIter)(mp2->chunks);
776 	   while (VG_(HT_Next)(mp2->chunks)) {
777 	     total_chunks++;
778 	   }
779 	 }
780 
781          VG_(message)(Vg_UserMsg,
782                       "Total mempools active: %u pools, %u chunks\n",
783 		      total_pools, total_chunks);
784 	 tick = 0;
785        }
786    }
787 
788 
789    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
790 
791    /* Sanity check; assert that the blocks are now in order */
792    for (i = 0; i < n_chunks-1; i++) {
793       if (chunks[i]->data > chunks[i+1]->data) {
794          VG_(message)(Vg_UserMsg,
795                       "Mempool chunk %u / %u is out of order "
796                       "wrt. its successor\n",
797                       i+1, n_chunks);
798          bad = 1;
799       }
800    }
801 
802    /* Sanity check -- make sure they don't overlap */
803    for (i = 0; i < n_chunks-1; i++) {
804       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
805          VG_(message)(Vg_UserMsg,
806                       "Mempool chunk %u / %u overlaps with its successor\n",
807                       i+1, n_chunks);
808          bad = 1;
809       }
810    }
811 
812    if (bad) {
813          VG_(message)(Vg_UserMsg,
814                 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
815                 n_chunks);
816          for (i = 0; i < n_chunks; ++i) {
817             VG_(message)(Vg_UserMsg,
818                          "Mempool chunk %u / %u: %lu bytes "
819                          "[%lx,%lx), allocated:\n",
820                          i+1,
821                          n_chunks,
822                          chunks[i]->szB + 0UL,
823                          chunks[i]->data,
824                          chunks[i]->data + chunks[i]->szB);
825 
826             VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
827          }
828    }
829    VG_(free)(chunks);
830 }
831 
MC_(mempool_alloc)832 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
833 {
834    MC_Mempool* mp;
835 
836    if (VG_(clo_verbosity) > 2) {
837       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
838                                pool, addr, szB);
839       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
840    }
841 
842    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
843    if (mp == NULL) {
844       MC_(record_illegal_mempool_error) ( tid, pool );
845    } else {
846       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
847       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
848                      MC_AllocCustom, mp->chunks);
849       if (mp->rzB > 0) {
850          // This is not needed if the user application has properly
851          // marked the superblock noaccess when defining the mempool.
852          // We however still mark the redzones noaccess to still catch
853          // some bugs if user forgot.
854          MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
855          MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
856       }
857       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
858    }
859 }
860 
MC_(mempool_free)861 void MC_(mempool_free)(Addr pool, Addr addr)
862 {
863    MC_Mempool*  mp;
864    MC_Chunk*    mc;
865    ThreadId     tid = VG_(get_running_tid)();
866 
867    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
868    if (mp == NULL) {
869       MC_(record_illegal_mempool_error)(tid, pool);
870       return;
871    }
872 
873    if (VG_(clo_verbosity) > 2) {
874       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
875       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
876    }
877 
878    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
879    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
880    if (mc == NULL) {
881       MC_(record_free_error)(tid, (Addr)addr);
882       return;
883    }
884 
885    if (VG_(clo_verbosity) > 2) {
886       VG_(message)(Vg_UserMsg,
887 		   "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
888 		   pool, addr, mc->szB + 0UL);
889    }
890 
891    die_and_free_mem ( tid, mc, mp->rzB );
892    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
893 }
894 
895 
MC_(mempool_trim)896 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
897 {
898    MC_Mempool*  mp;
899    MC_Chunk*    mc;
900    ThreadId     tid = VG_(get_running_tid)();
901    UInt         n_shadows, i;
902    VgHashNode** chunks;
903 
904    if (VG_(clo_verbosity) > 2) {
905       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
906                                pool, addr, szB);
907       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
908    }
909 
910    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
911    if (mp == NULL) {
912       MC_(record_illegal_mempool_error)(tid, pool);
913       return;
914    }
915 
916    check_mempool_sane(mp);
917    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
918    if (n_shadows == 0) {
919      tl_assert(chunks == NULL);
920      return;
921    }
922 
923    tl_assert(chunks != NULL);
924    for (i = 0; i < n_shadows; ++i) {
925 
926       Addr lo, hi, min, max;
927 
928       mc = (MC_Chunk*) chunks[i];
929 
930       lo = mc->data;
931       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
932 
933 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
934 
935       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
936 
937          /* The current chunk is entirely within the trim extent: keep
938             it. */
939 
940          continue;
941 
942       } else if ( (! EXTENT_CONTAINS(lo)) &&
943                   (! EXTENT_CONTAINS(hi)) ) {
944 
945          /* The current chunk is entirely outside the trim extent:
946             delete it. */
947 
948          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
949             MC_(record_free_error)(tid, (Addr)mc->data);
950             VG_(free)(chunks);
951             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
952             return;
953          }
954          die_and_free_mem ( tid, mc, mp->rzB );
955 
956       } else {
957 
958          /* The current chunk intersects the trim extent: remove,
959             trim, and reinsert it. */
960 
961          tl_assert(EXTENT_CONTAINS(lo) ||
962                    EXTENT_CONTAINS(hi));
963          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
964             MC_(record_free_error)(tid, (Addr)mc->data);
965             VG_(free)(chunks);
966             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
967             return;
968          }
969 
970          if (mc->data < addr) {
971            min = mc->data;
972            lo = addr;
973          } else {
974            min = addr;
975            lo = mc->data;
976          }
977 
978          if (mc->data + szB > addr + szB) {
979            max = mc->data + szB;
980            hi = addr + szB;
981          } else {
982            max = addr + szB;
983            hi = mc->data + szB;
984          }
985 
986          tl_assert(min <= lo);
987          tl_assert(lo < hi);
988          tl_assert(hi <= max);
989 
990          if (min < lo && !EXTENT_CONTAINS(min)) {
991            MC_(make_mem_noaccess)( min, lo - min);
992          }
993 
994          if (hi < max && !EXTENT_CONTAINS(max)) {
995            MC_(make_mem_noaccess)( hi, max - hi );
996          }
997 
998          mc->data = lo;
999          mc->szB = (UInt) (hi - lo);
1000          VG_(HT_add_node)( mp->chunks, mc );
1001       }
1002 
1003 #undef EXTENT_CONTAINS
1004 
1005    }
1006    check_mempool_sane(mp);
1007    VG_(free)(chunks);
1008 }
1009 
MC_(move_mempool)1010 void MC_(move_mempool)(Addr poolA, Addr poolB)
1011 {
1012    MC_Mempool* mp;
1013 
1014    if (VG_(clo_verbosity) > 2) {
1015       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1016       VG_(get_and_pp_StackTrace)
1017          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1018    }
1019 
1020    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1021 
1022    if (mp == NULL) {
1023       ThreadId tid = VG_(get_running_tid)();
1024       MC_(record_illegal_mempool_error) ( tid, poolA );
1025       return;
1026    }
1027 
1028    mp->pool = poolB;
1029    VG_(HT_add_node)( MC_(mempool_list), mp );
1030 }
1031 
MC_(mempool_change)1032 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1033 {
1034    MC_Mempool*  mp;
1035    MC_Chunk*    mc;
1036    ThreadId     tid = VG_(get_running_tid)();
1037 
1038    if (VG_(clo_verbosity) > 2) {
1039       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1040                    pool, addrA, addrB, szB);
1041       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1042    }
1043 
1044    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1045    if (mp == NULL) {
1046       MC_(record_illegal_mempool_error)(tid, pool);
1047       return;
1048    }
1049 
1050    check_mempool_sane(mp);
1051 
1052    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1053    if (mc == NULL) {
1054       MC_(record_free_error)(tid, (Addr)addrA);
1055       return;
1056    }
1057 
1058    mc->data = addrB;
1059    mc->szB  = szB;
1060    VG_(HT_add_node)( mp->chunks, mc );
1061 
1062    check_mempool_sane(mp);
1063 }
1064 
MC_(mempool_exists)1065 Bool MC_(mempool_exists)(Addr pool)
1066 {
1067    MC_Mempool*  mp;
1068 
1069    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1070    if (mp == NULL) {
1071        return False;
1072    }
1073    return True;
1074 }
1075 
1076 
1077 /*------------------------------------------------------------*/
1078 /*--- Statistics printing                                  ---*/
1079 /*------------------------------------------------------------*/
1080 
MC_(print_malloc_stats)1081 void MC_(print_malloc_stats) ( void )
1082 {
1083    MC_Chunk* mc;
1084    SizeT     nblocks = 0;
1085    ULong     nbytes  = 0;
1086 
1087    if (VG_(clo_verbosity) == 0)
1088       return;
1089    if (VG_(clo_xml))
1090       return;
1091 
1092    /* Count memory still in use. */
1093    VG_(HT_ResetIter)(MC_(malloc_list));
1094    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1095       nblocks++;
1096       nbytes += (ULong)mc->szB;
1097    }
1098 
1099    VG_(umsg)(
1100       "HEAP SUMMARY:\n"
1101       "    in use at exit: %'llu bytes in %'lu blocks\n"
1102       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1103       "\n",
1104       nbytes, nblocks,
1105       cmalloc_n_mallocs,
1106       cmalloc_n_frees, cmalloc_bs_mallocd
1107    );
1108 }
1109 
MC_(get_cmalloc_n_frees)1110 SizeT MC_(get_cmalloc_n_frees) ( void )
1111 {
1112    return cmalloc_n_frees;
1113 }
1114 
1115 
1116 /*--------------------------------------------------------------------*/
1117 /*--- end                                                          ---*/
1118 /*--------------------------------------------------------------------*/
1119