• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*---                                         mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2012 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
44 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
45 
46 #include "mc_include.h"
47 
48 /*------------------------------------------------------------*/
49 /*--- Defns                                                ---*/
50 /*------------------------------------------------------------*/
51 
52 /* Stats ... */
53 static SizeT cmalloc_n_mallocs  = 0;
54 static SizeT cmalloc_n_frees    = 0;
55 static ULong cmalloc_bs_mallocd = 0;
56 
57 /* For debug printing to do with mempools: what stack trace
58    depth to show. */
59 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60 
61 
62 /*------------------------------------------------------------*/
63 /*--- Tracking malloc'd and free'd blocks                  ---*/
64 /*------------------------------------------------------------*/
65 
66 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
67 
68 /* Record malloc'd blocks. */
69 VgHashTable MC_(malloc_list) = NULL;
70 
71 /* Memory pools: a hash table of MC_Mempools.  Search key is
72    MC_Mempool::pool. */
73 VgHashTable MC_(mempool_list) = NULL;
74 
75 /* Pool allocator for MC_Chunk. */
76 PoolAlloc *MC_(chunk_poolalloc) = NULL;
77 static
78 MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
79                             MC_AllocKind kind);
80 static inline
81 void delete_MC_Chunk (MC_Chunk* mc);
82 
83 /* Records blocks after freeing. */
84 /* Blocks freed by the client are queued in one of two lists of
85    freed blocks not yet physically freed:
86    "big blocks" freed list.
87    "small blocks" freed list
88    The blocks with a size >= MC_(clo_freelist_big_blocks)
89    are linked in the big blocks freed list.
90    This allows a client to allocate and free big blocks
91    (e.g. bigger than VG_(clo_freelist_vol)) without losing
92    immediately all protection against dangling pointers.
93    position [0] is for big blocks, [1] is for small blocks. */
94 static MC_Chunk* freed_list_start[2]  = {NULL, NULL};
95 static MC_Chunk* freed_list_end[2]    = {NULL, NULL};
96 
97 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
98    some of the oldest blocks in the queue at the same time. */
add_to_freed_queue(MC_Chunk * mc)99 static void add_to_freed_queue ( MC_Chunk* mc )
100 {
101    const Bool show = False;
102    const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
103 
104    /* Put it at the end of the freed list, unless the block
105       would be directly released any way : in this case, we
106       put it at the head of the freed list. */
107    if (freed_list_end[l] == NULL) {
108       tl_assert(freed_list_start[l] == NULL);
109       mc->next = NULL;
110       freed_list_end[l]    = freed_list_start[l] = mc;
111    } else {
112       tl_assert(freed_list_end[l]->next == NULL);
113       if (mc->szB >= MC_(clo_freelist_vol)) {
114          mc->next = freed_list_start[l];
115          freed_list_start[l] = mc;
116       } else {
117          mc->next = NULL;
118          freed_list_end[l]->next = mc;
119          freed_list_end[l]       = mc;
120       }
121    }
122    VG_(free_queue_volume) += (Long)mc->szB;
123    if (show)
124       VG_(printf)("mc_freelist: acquire: volume now %lld\n",
125                   VG_(free_queue_volume));
126    VG_(free_queue_length)++;
127 }
128 
129 /* Release enough of the oldest blocks to bring the free queue
130    volume below vg_clo_freelist_vol.
131    Start with big block list first.
132    On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133    On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
release_oldest_block(void)134 static void release_oldest_block(void)
135 {
136    const Bool show = False;
137    int i;
138    tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
139    tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
140 
141    for (i = 0; i < 2; i++) {
142       while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
143              && freed_list_start[i] != NULL) {
144          MC_Chunk* mc1;
145 
146          tl_assert(freed_list_end[i] != NULL);
147 
148          mc1 = freed_list_start[i];
149          VG_(free_queue_volume) -= (Long)mc1->szB;
150          VG_(free_queue_length)--;
151          if (show)
152             VG_(printf)("mc_freelist: discard: volume now %lld\n",
153                         VG_(free_queue_volume));
154          tl_assert(VG_(free_queue_volume) >= 0);
155 
156          if (freed_list_start[i] == freed_list_end[i]) {
157             freed_list_start[i] = freed_list_end[i] = NULL;
158          } else {
159             freed_list_start[i] = mc1->next;
160          }
161          mc1->next = NULL; /* just paranoia */
162 
163          /* free MC_Chunk */
164          if (MC_AllocCustom != mc1->allockind)
165             VG_(cli_free) ( (void*)(mc1->data) );
166          delete_MC_Chunk ( mc1 );
167       }
168    }
169 }
170 
MC_(get_freed_block_bracketting)171 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
172 {
173    int i;
174    for (i = 0; i < 2; i++) {
175       MC_Chunk*  mc;
176       mc = freed_list_start[i];
177       while (mc) {
178          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
179                                     MC_(Malloc_Redzone_SzB) ))
180             return mc;
181          mc = mc->next;
182       }
183    }
184    return NULL;
185 }
186 
187 /* Allocate a shadow chunk, put it on the appropriate list.
188    If needed, release oldest blocks from freed list. */
189 static
create_MC_Chunk(ExeContext * ec,Addr p,SizeT szB,MC_AllocKind kind)190 MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
191                             MC_AllocKind kind)
192 {
193    MC_Chunk* mc  = VG_(allocEltPA)(MC_(chunk_poolalloc));
194    mc->data      = p;
195    mc->szB       = szB;
196    mc->allockind = kind;
197    mc->where     = ec;
198 
199    /* Each time a new MC_Chunk is created, release oldest blocks
200       if the free list volume is exceeded. */
201    if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
202       release_oldest_block();
203 
204    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
205       the mc->data field isn't visible to the leak checker.  If memory
206       management is working correctly, any pointer returned by VG_(malloc)
207       should be noaccess as far as the client is concerned. */
208    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
209       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
210    }
211    return mc;
212 }
213 
214 static inline
delete_MC_Chunk(MC_Chunk * mc)215 void delete_MC_Chunk (MC_Chunk* mc)
216 {
217    VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
218 }
219 
220 /*------------------------------------------------------------*/
221 /*--- client_malloc(), etc                                 ---*/
222 /*------------------------------------------------------------*/
223 
224 // XXX: should make this a proper error (bug #79311).
complain_about_silly_args(SizeT sizeB,Char * fn)225 static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
226 {
227    // Cast to a signed type to catch any unexpectedly negative args.  We're
228    // assuming here that the size asked for is not greater than 2^31 bytes
229    // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
230    if ((SSizeT)sizeB < 0) {
231       if (!VG_(clo_xml))
232          VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
233                       (SSizeT)sizeB, fn );
234       return True;
235    }
236    return False;
237 }
238 
complain_about_silly_args2(SizeT n,SizeT sizeB)239 static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
240 {
241    if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
242       if (!VG_(clo_xml))
243          VG_(message)(Vg_UserMsg,
244                       "Warning: silly args (%ld,%ld) to calloc()\n",
245                       (SSizeT)n, (SSizeT)sizeB);
246       return True;
247    }
248    return False;
249 }
250 
251 /* Allocate memory and note change in memory available */
MC_(new_block)252 void* MC_(new_block) ( ThreadId tid,
253                        Addr p, SizeT szB, SizeT alignB,
254                        Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
255 {
256    ExeContext* ec;
257 
258    // Allocate and zero if necessary
259    if (p) {
260       tl_assert(MC_AllocCustom == kind);
261    } else {
262       tl_assert(MC_AllocCustom != kind);
263       p = (Addr)VG_(cli_malloc)( alignB, szB );
264       if (!p) {
265          return NULL;
266       }
267       if (is_zeroed) {
268          VG_(memset)((void*)p, 0, szB);
269       } else
270       if (MC_(clo_malloc_fill) != -1) {
271          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
272          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
273       }
274    }
275 
276    // Only update stats if allocation succeeded.
277    cmalloc_n_mallocs ++;
278    cmalloc_bs_mallocd += (ULong)szB;
279 
280    ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
281    tl_assert(ec);
282 
283    VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
284 
285    if (is_zeroed)
286       MC_(make_mem_defined)( p, szB );
287    else {
288       UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
289       tl_assert(VG_(is_plausible_ECU)(ecu));
290       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
291    }
292 
293    return (void*)p;
294 }
295 
MC_(malloc)296 void* MC_(malloc) ( ThreadId tid, SizeT n )
297 {
298    if (complain_about_silly_args(n, "malloc")) {
299       return NULL;
300    } else {
301       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
302          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
303    }
304 }
305 
MC_(__builtin_new)306 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
307 {
308    if (complain_about_silly_args(n, "__builtin_new")) {
309       return NULL;
310    } else {
311       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
312          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
313    }
314 }
315 
MC_(__builtin_vec_new)316 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
317 {
318    if (complain_about_silly_args(n, "__builtin_vec_new")) {
319       return NULL;
320    } else {
321       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
322          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
323    }
324 }
325 
MC_(memalign)326 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
327 {
328    if (complain_about_silly_args(n, "memalign")) {
329       return NULL;
330    } else {
331       return MC_(new_block) ( tid, 0, n, alignB,
332          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
333    }
334 }
335 
MC_(calloc)336 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
337 {
338    if (complain_about_silly_args2(nmemb, size1)) {
339       return NULL;
340    } else {
341       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
342          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
343    }
344 }
345 
346 static
die_and_free_mem(ThreadId tid,MC_Chunk * mc,SizeT rzB)347 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
348 {
349    /* Note: we do not free fill the custom allocs produced
350       by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
351    if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
352       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
353       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
354    }
355 
356    /* Note: make redzones noaccess again -- just in case user made them
357       accessible with a client request... */
358    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
359 
360    /* Record where freed */
361    mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
362    /* Put it out of harm's way for a while */
363    add_to_freed_queue ( mc );
364    /* If the free list volume is bigger than MC_(clo_freelist_vol),
365       we wait till the next block allocation to release blocks.
366       This increase the chance to discover dangling pointer usage,
367       even for big blocks being freed by the client. */
368 }
369 
MC_(handle_free)370 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
371 {
372    MC_Chunk* mc;
373 
374    cmalloc_n_frees++;
375 
376    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
377    if (mc == NULL) {
378       MC_(record_free_error) ( tid, p );
379    } else {
380       /* check if it is a matching free() / delete / delete [] */
381       if (kind != mc->allockind) {
382          tl_assert(p == mc->data);
383          MC_(record_freemismatch_error) ( tid, mc );
384       }
385       die_and_free_mem ( tid, mc, rzB );
386    }
387 }
388 
MC_(free)389 void MC_(free) ( ThreadId tid, void* p )
390 {
391    MC_(handle_free)(
392       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
393 }
394 
MC_(__builtin_delete)395 void MC_(__builtin_delete) ( ThreadId tid, void* p )
396 {
397    MC_(handle_free)(
398       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
399 }
400 
MC_(__builtin_vec_delete)401 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
402 {
403    MC_(handle_free)(
404       tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
405 }
406 
MC_(realloc)407 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
408 {
409    MC_Chunk* mc;
410    void*     p_new;
411    SizeT     old_szB;
412 
413    if (complain_about_silly_args(new_szB, "realloc"))
414       return NULL;
415 
416    cmalloc_n_frees ++;
417    cmalloc_n_mallocs ++;
418    cmalloc_bs_mallocd += (ULong)new_szB;
419 
420    /* Remove the old block */
421    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
422    if (mc == NULL) {
423       MC_(record_free_error) ( tid, (Addr)p_old );
424       /* We return to the program regardless. */
425       return NULL;
426    }
427 
428    /* check if its a matching free() / delete / delete [] */
429    if (MC_AllocMalloc != mc->allockind) {
430       /* can not realloc a range that was allocated with new or new [] */
431       tl_assert((Addr)p_old == mc->data);
432       MC_(record_freemismatch_error) ( tid, mc );
433       /* but keep going anyway */
434    }
435 
436    old_szB = mc->szB;
437 
438    /* In all cases, even when the new size is smaller or unchanged, we
439       reallocate and copy the contents, and make the old block
440       inaccessible.  This is so as to guarantee to catch all cases of
441       accesses via the old address after reallocation, regardless of
442       the change in size.  (Of course the ability to detect accesses
443       to the old block also depends on the size of the freed blocks
444       queue). */
445 
446    if (new_szB <= old_szB) {
447       /* new size is smaller or the same */
448       Addr a_new;
449       /* Get new memory */
450       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
451 
452       if (a_new) {
453          ExeContext* ec;
454 
455          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
456          tl_assert(ec);
457 
458          /* Retained part is copied, red zones set as normal */
459          MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
460                                  MC_(Malloc_Redzone_SzB) );
461          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
462          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
463 
464          /* Copy from old to new */
465          VG_(memcpy)((void*)a_new, p_old, new_szB);
466 
467          /* Possibly fill freed area with specified junk. */
468          if (MC_(clo_free_fill) != -1) {
469             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
470             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
471          }
472 
473          /* Free old memory */
474          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
475             than recycling the old one, so that any erroneous accesses to the
476             old memory are reported. */
477          die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
478 
479          // Allocate a new chunk.
480          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
481       }
482 
483       p_new = (void*)a_new;
484 
485    } else {
486       /* new size is bigger */
487       Addr a_new;
488       tl_assert(old_szB < new_szB);
489       /* Get new memory */
490       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
491 
492       if (a_new) {
493          UInt        ecu;
494          ExeContext* ec;
495 
496          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
497          tl_assert(ec);
498          ecu = VG_(get_ECU_from_ExeContext)(ec);
499          tl_assert(VG_(is_plausible_ECU)(ecu));
500 
501          /* First half kept and copied, second half new, red zones as normal */
502          MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
503                                  MC_(Malloc_Redzone_SzB) );
504          MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
505          MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
506                                                         ecu | MC_OKIND_HEAP );
507          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB) );
508 
509          /* Possibly fill new area with specified junk */
510          if (MC_(clo_malloc_fill) != -1) {
511             tl_assert(MC_(clo_malloc_fill) >= 0x00
512                       && MC_(clo_malloc_fill) <= 0xFF);
513             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
514                                                 new_szB-old_szB);
515          }
516 
517          /* Copy from old to new */
518          VG_(memcpy)((void*)a_new, p_old, mc->szB);
519 
520          /* Possibly fill freed area with specified junk. */
521          if (MC_(clo_free_fill) != -1) {
522             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
523             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
524          }
525 
526          /* Free old memory */
527          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
528             than recycling the old one, so that any erroneous accesses to the
529             old memory are reported. */
530          die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
531 
532          // Allocate a new chunk.
533          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
534       }
535 
536       p_new = (void*)a_new;
537    }
538 
539    // Now insert the new mc (with a possibly new 'data' field) into
540    // malloc_list.  If this realloc() did not increase the memory size, we
541    // will have removed and then re-added mc unnecessarily.  But that's ok
542    // because shrinking a block with realloc() is (presumably) much rarer
543    // than growing it, and this way simplifies the growing case.
544    VG_(HT_add_node)( MC_(malloc_list), mc );
545 
546    return p_new;
547 }
548 
MC_(malloc_usable_size)549 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
550 {
551    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
552 
553    // There may be slop, but pretend there isn't because only the asked-for
554    // area will be marked as addressable.
555    return ( mc ? mc->szB : 0 );
556 }
557 
558 /* This handles the in place resize of a block, as performed by the
559    VALGRIND_RESIZEINPLACE_BLOCK client request.  It is unrelated to,
560    and not used for, handling of the normal libc realloc()
561    function. */
MC_(handle_resizeInPlace)562 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
563                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
564 {
565    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
566    if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
567       /* Reject if: p is not found, or oldSizeB is wrong,
568          or new block would be empty. */
569       MC_(record_free_error) ( tid, p );
570       return;
571    }
572 
573    if (oldSizeB == newSizeB)
574       return;
575 
576    mc->szB = newSizeB;
577    if (newSizeB < oldSizeB) {
578       MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
579    } else {
580       ExeContext* ec  = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
581       UInt        ecu = VG_(get_ECU_from_ExeContext)(ec);
582       MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
583                                       ecu | MC_OKIND_HEAP );
584       if (rzB > 0)
585          MC_(make_mem_noaccess)( p + newSizeB, rzB );
586    }
587 }
588 
589 
590 /*------------------------------------------------------------*/
591 /*--- Memory pool stuff.                                   ---*/
592 /*------------------------------------------------------------*/
593 
594 /* Set to 1 for intensive sanity checking.  Is very expensive though
595    and should not be used in production scenarios.  See #255966. */
596 #define MP_DETAILED_SANITY_CHECKS 0
597 
598 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
599 
600 
MC_(create_mempool)601 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
602 {
603    MC_Mempool* mp;
604 
605    if (VG_(clo_verbosity) > 2) {
606       VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
607                                pool, rzB, is_zeroed);
608       VG_(get_and_pp_StackTrace)
609          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
610    }
611 
612    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
613    if (mp != NULL) {
614      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
615    }
616 
617    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
618    mp->pool       = pool;
619    mp->rzB        = rzB;
620    mp->is_zeroed  = is_zeroed;
621    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
622    check_mempool_sane(mp);
623 
624    /* Paranoia ... ensure this area is off-limits to the client, so
625       the mp->data field isn't visible to the leak checker.  If memory
626       management is working correctly, anything pointer returned by
627       VG_(malloc) should be noaccess as far as the client is
628       concerned. */
629    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
630       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
631    }
632 
633    VG_(HT_add_node)( MC_(mempool_list), mp );
634 }
635 
MC_(destroy_mempool)636 void MC_(destroy_mempool)(Addr pool)
637 {
638    MC_Chunk*   mc;
639    MC_Mempool* mp;
640 
641    if (VG_(clo_verbosity) > 2) {
642       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
643       VG_(get_and_pp_StackTrace)
644          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
645    }
646 
647    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
648 
649    if (mp == NULL) {
650       ThreadId tid = VG_(get_running_tid)();
651       MC_(record_illegal_mempool_error) ( tid, pool );
652       return;
653    }
654    check_mempool_sane(mp);
655 
656    // Clean up the chunks, one by one
657    VG_(HT_ResetIter)(mp->chunks);
658    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
659       /* Note: make redzones noaccess again -- just in case user made them
660          accessible with a client request... */
661       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
662    }
663    // Destroy the chunk table
664    VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
665 
666    VG_(free)(mp);
667 }
668 
669 static Int
mp_compar(void * n1,void * n2)670 mp_compar(void* n1, void* n2)
671 {
672    MC_Chunk* mc1 = *(MC_Chunk**)n1;
673    MC_Chunk* mc2 = *(MC_Chunk**)n2;
674    if (mc1->data < mc2->data) return -1;
675    if (mc1->data > mc2->data) return  1;
676    return 0;
677 }
678 
679 static void
check_mempool_sane(MC_Mempool * mp)680 check_mempool_sane(MC_Mempool* mp)
681 {
682    UInt n_chunks, i, bad = 0;
683    static UInt tick = 0;
684 
685    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
686    if (!chunks)
687       return;
688 
689    if (VG_(clo_verbosity) > 1) {
690      if (tick++ >= 10000)
691        {
692 	 UInt total_pools = 0, total_chunks = 0;
693 	 MC_Mempool* mp2;
694 
695 	 VG_(HT_ResetIter)(MC_(mempool_list));
696 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
697 	   total_pools++;
698 	   VG_(HT_ResetIter)(mp2->chunks);
699 	   while (VG_(HT_Next)(mp2->chunks)) {
700 	     total_chunks++;
701 	   }
702 	 }
703 
704          VG_(message)(Vg_UserMsg,
705                       "Total mempools active: %d pools, %d chunks\n",
706 		      total_pools, total_chunks);
707 	 tick = 0;
708        }
709    }
710 
711 
712    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
713 
714    /* Sanity check; assert that the blocks are now in order */
715    for (i = 0; i < n_chunks-1; i++) {
716       if (chunks[i]->data > chunks[i+1]->data) {
717          VG_(message)(Vg_UserMsg,
718                       "Mempool chunk %d / %d is out of order "
719                       "wrt. its successor\n",
720                       i+1, n_chunks);
721          bad = 1;
722       }
723    }
724 
725    /* Sanity check -- make sure they don't overlap */
726    for (i = 0; i < n_chunks-1; i++) {
727       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
728          VG_(message)(Vg_UserMsg,
729                       "Mempool chunk %d / %d overlaps with its successor\n",
730                       i+1, n_chunks);
731          bad = 1;
732       }
733    }
734 
735    if (bad) {
736          VG_(message)(Vg_UserMsg,
737                 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
738                 n_chunks);
739          for (i = 0; i < n_chunks; ++i) {
740             VG_(message)(Vg_UserMsg,
741                          "Mempool chunk %d / %d: %ld bytes "
742                          "[%lx,%lx), allocated:\n",
743                          i+1,
744                          n_chunks,
745                          chunks[i]->szB + 0UL,
746                          chunks[i]->data,
747                          chunks[i]->data + chunks[i]->szB);
748 
749             VG_(pp_ExeContext)(chunks[i]->where);
750          }
751    }
752    VG_(free)(chunks);
753 }
754 
MC_(mempool_alloc)755 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
756 {
757    MC_Mempool* mp;
758 
759    if (VG_(clo_verbosity) > 2) {
760       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
761                                pool, addr, szB);
762       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
763    }
764 
765    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
766    if (mp == NULL) {
767       MC_(record_illegal_mempool_error) ( tid, pool );
768    } else {
769       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
770       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
771                      MC_AllocCustom, mp->chunks);
772       if (mp->rzB > 0) {
773          // This is not needed if the user application has properly
774          // marked the superblock noaccess when defining the mempool.
775          // We however still mark the redzones noaccess to still catch
776          // some bugs if user forgot.
777          MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
778          MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
779       }
780       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
781    }
782 }
783 
MC_(mempool_free)784 void MC_(mempool_free)(Addr pool, Addr addr)
785 {
786    MC_Mempool*  mp;
787    MC_Chunk*    mc;
788    ThreadId     tid = VG_(get_running_tid)();
789 
790    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
791    if (mp == NULL) {
792       MC_(record_illegal_mempool_error)(tid, pool);
793       return;
794    }
795 
796    if (VG_(clo_verbosity) > 2) {
797       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
798       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
799    }
800 
801    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
802    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
803    if (mc == NULL) {
804       MC_(record_free_error)(tid, (Addr)addr);
805       return;
806    }
807 
808    if (VG_(clo_verbosity) > 2) {
809       VG_(message)(Vg_UserMsg,
810 		   "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
811 		   pool, addr, mc->szB + 0UL);
812    }
813 
814    die_and_free_mem ( tid, mc, mp->rzB );
815    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
816 }
817 
818 
MC_(mempool_trim)819 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
820 {
821    MC_Mempool*  mp;
822    MC_Chunk*    mc;
823    ThreadId     tid = VG_(get_running_tid)();
824    UInt         n_shadows, i;
825    VgHashNode** chunks;
826 
827    if (VG_(clo_verbosity) > 2) {
828       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
829                                pool, addr, szB);
830       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
831    }
832 
833    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
834    if (mp == NULL) {
835       MC_(record_illegal_mempool_error)(tid, pool);
836       return;
837    }
838 
839    check_mempool_sane(mp);
840    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
841    if (n_shadows == 0) {
842      tl_assert(chunks == NULL);
843      return;
844    }
845 
846    tl_assert(chunks != NULL);
847    for (i = 0; i < n_shadows; ++i) {
848 
849       Addr lo, hi, min, max;
850 
851       mc = (MC_Chunk*) chunks[i];
852 
853       lo = mc->data;
854       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
855 
856 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
857 
858       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
859 
860          /* The current chunk is entirely within the trim extent: keep
861             it. */
862 
863          continue;
864 
865       } else if ( (! EXTENT_CONTAINS(lo)) &&
866                   (! EXTENT_CONTAINS(hi)) ) {
867 
868          /* The current chunk is entirely outside the trim extent:
869             delete it. */
870 
871          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
872             MC_(record_free_error)(tid, (Addr)mc->data);
873             VG_(free)(chunks);
874             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
875             return;
876          }
877          die_and_free_mem ( tid, mc, mp->rzB );
878 
879       } else {
880 
881          /* The current chunk intersects the trim extent: remove,
882             trim, and reinsert it. */
883 
884          tl_assert(EXTENT_CONTAINS(lo) ||
885                    EXTENT_CONTAINS(hi));
886          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
887             MC_(record_free_error)(tid, (Addr)mc->data);
888             VG_(free)(chunks);
889             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
890             return;
891          }
892 
893          if (mc->data < addr) {
894            min = mc->data;
895            lo = addr;
896          } else {
897            min = addr;
898            lo = mc->data;
899          }
900 
901          if (mc->data + szB > addr + szB) {
902            max = mc->data + szB;
903            hi = addr + szB;
904          } else {
905            max = addr + szB;
906            hi = mc->data + szB;
907          }
908 
909          tl_assert(min <= lo);
910          tl_assert(lo < hi);
911          tl_assert(hi <= max);
912 
913          if (min < lo && !EXTENT_CONTAINS(min)) {
914            MC_(make_mem_noaccess)( min, lo - min);
915          }
916 
917          if (hi < max && !EXTENT_CONTAINS(max)) {
918            MC_(make_mem_noaccess)( hi, max - hi );
919          }
920 
921          mc->data = lo;
922          mc->szB = (UInt) (hi - lo);
923          VG_(HT_add_node)( mp->chunks, mc );
924       }
925 
926 #undef EXTENT_CONTAINS
927 
928    }
929    check_mempool_sane(mp);
930    VG_(free)(chunks);
931 }
932 
MC_(move_mempool)933 void MC_(move_mempool)(Addr poolA, Addr poolB)
934 {
935    MC_Mempool* mp;
936 
937    if (VG_(clo_verbosity) > 2) {
938       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
939       VG_(get_and_pp_StackTrace)
940          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
941    }
942 
943    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
944 
945    if (mp == NULL) {
946       ThreadId tid = VG_(get_running_tid)();
947       MC_(record_illegal_mempool_error) ( tid, poolA );
948       return;
949    }
950 
951    mp->pool = poolB;
952    VG_(HT_add_node)( MC_(mempool_list), mp );
953 }
954 
MC_(mempool_change)955 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
956 {
957    MC_Mempool*  mp;
958    MC_Chunk*    mc;
959    ThreadId     tid = VG_(get_running_tid)();
960 
961    if (VG_(clo_verbosity) > 2) {
962       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
963                    pool, addrA, addrB, szB);
964       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
965    }
966 
967    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
968    if (mp == NULL) {
969       MC_(record_illegal_mempool_error)(tid, pool);
970       return;
971    }
972 
973    check_mempool_sane(mp);
974 
975    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
976    if (mc == NULL) {
977       MC_(record_free_error)(tid, (Addr)addrA);
978       return;
979    }
980 
981    mc->data = addrB;
982    mc->szB  = szB;
983    VG_(HT_add_node)( mp->chunks, mc );
984 
985    check_mempool_sane(mp);
986 }
987 
MC_(mempool_exists)988 Bool MC_(mempool_exists)(Addr pool)
989 {
990    MC_Mempool*  mp;
991 
992    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
993    if (mp == NULL) {
994        return False;
995    }
996    return True;
997 }
998 
999 
1000 /*------------------------------------------------------------*/
1001 /*--- Statistics printing                                  ---*/
1002 /*------------------------------------------------------------*/
1003 
MC_(print_malloc_stats)1004 void MC_(print_malloc_stats) ( void )
1005 {
1006    MC_Chunk* mc;
1007    SizeT     nblocks = 0;
1008    ULong     nbytes  = 0;
1009 
1010    if (VG_(clo_verbosity) == 0)
1011       return;
1012    if (VG_(clo_xml))
1013       return;
1014 
1015    /* Count memory still in use. */
1016    VG_(HT_ResetIter)(MC_(malloc_list));
1017    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1018       nblocks++;
1019       nbytes += (ULong)mc->szB;
1020    }
1021 
1022    VG_(umsg)(
1023       "HEAP SUMMARY:\n"
1024       "    in use at exit: %'llu bytes in %'lu blocks\n"
1025       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1026       "\n",
1027       nbytes, nblocks,
1028       cmalloc_n_mallocs,
1029       cmalloc_n_frees, cmalloc_bs_mallocd
1030    );
1031 }
1032 
MC_(get_cmalloc_n_frees)1033 SizeT MC_(get_cmalloc_n_frees) ( void )
1034 {
1035    return cmalloc_n_frees;
1036 }
1037 
1038 
1039 /*--------------------------------------------------------------------*/
1040 /*--- end                                                          ---*/
1041 /*--------------------------------------------------------------------*/
1042