1 /*--------------------------------------------------------------------*/
2 /*--- A pool (memory) allocator that avoids duplicated copies. ---*/
3 /*--- m_deduppoolalloc.c ---*/
4 /*--------------------------------------------------------------------*/
5 /*
6 This file is part of Valgrind, a dynamic binary instrumentation
7 framework.
8
9 Copyright (C) 2014-2017 Philippe Waroquiers philippe.waroquiers@skynet.be
10
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 02111-1307, USA.
25
26 The GNU General Public License is contained in the file COPYING.
27 */
28
29 #include "pub_core_basics.h"
30 #include "pub_core_libcbase.h"
31 #include "pub_core_libcprint.h"
32 #include "pub_core_libcassert.h"
33 #include "pub_core_xarray.h"
34 #include "pub_core_deduppoolalloc.h" /* self */
35 #include "pub_core_hashtable.h"
36 #include "pub_core_poolalloc.h"
37 #include "pub_core_options.h"
38 #include "pub_core_mallocfree.h"
39 #include "pub_core_debuglog.h"
40
41 struct _DedupPoolAlloc {
42 SizeT poolSzB; /* Minimum size of a pool. */
43 SizeT fixedSzb; /* If using VG_(allocFixedEltDedupPA), size of elements */
44 Bool strPA; /* True if this is a string dedup pool */
45 SizeT eltAlign;
46 Alloc_Fn_t alloc_fn; /* pool allocator */
47 const HChar* cc; /* pool allocator's cost centre */
48 Free_Fn_t free_fn; /* pool allocator's deallocation function */
49 /* XArray of void* (pointers to pools). The pools themselves.
50 Each element is a pointer to a block of size at least PoolSzB bytes.
51 The last block might be smaller due to a call to shrink_block. */
52 XArray *pools;
53
54 /* hash table of pool elements, used to dedup.
55 If NULL, it means the DedupPoolAlloc is frozen. */
56 VgHashTable *ht_elements;
57
58 /* Hash table nodes of pool_elements are allocated with a pool, to
59 decrease memory overhead during insertion in the DedupPoolAlloc. */
60 PoolAlloc *ht_node_pa;
61
62 UChar *curpool; /* last allocated pool. */
63 UChar *curpool_free; /* Pos in current pool to allocate next elt.
64 always aligned on eltAlign. */
65 UChar *curpool_limit; /* Last pos in current pool. */
66 /* Note that for a fixed size pool, we only have a single pool to allow
67 simple/fast indexing. This single pool is grown, which might change
68 the address of the already allocated elements. */
69
70 /* Total nr of alloc calls, resulting in (we hope) a lot less
71 real (dedup) elements. */
72 ULong nr_alloc_calls;
73 };
74
75 typedef
76 struct _ht_node {
77 struct _ht_node *next; // Read/Write by hashtable (pub_tool_hashtable.h)
78 UWord key; // Read by hashtable (pub_tool_hashtable.h)
79 SizeT eltSzBorStrNr; // for a normal pool, elt size
80 // for a string pool, the unique str number
81 const void *elt;
82 }
83 ht_node;
84
VG_(newDedupPA)85 DedupPoolAlloc* VG_(newDedupPA) ( SizeT poolSzB,
86 SizeT eltAlign,
87 Alloc_Fn_t alloc_fn,
88 const HChar* cc,
89 Free_Fn_t free_fn )
90 {
91 DedupPoolAlloc* ddpa;
92 vg_assert(poolSzB >= eltAlign);
93 vg_assert(poolSzB >= 100); /* let's say */
94 vg_assert(poolSzB >= 10*eltAlign); /* let's say */
95 vg_assert(alloc_fn);
96 vg_assert(cc);
97 vg_assert(free_fn);
98 ddpa = alloc_fn(cc, sizeof(*ddpa));
99 VG_(memset)(ddpa, 0, sizeof(*ddpa));
100 ddpa->poolSzB = poolSzB;
101 ddpa->fixedSzb = 0;
102 ddpa->strPA = False;
103 ddpa->eltAlign = eltAlign;
104 ddpa->alloc_fn = alloc_fn;
105 ddpa->cc = cc;
106 ddpa->free_fn = free_fn;
107 ddpa->pools = VG_(newXA)( alloc_fn, cc, free_fn, sizeof(void*) );
108
109 ddpa->ht_elements = VG_(HT_construct) (cc);
110 ddpa->ht_node_pa = VG_(newPA) ( sizeof(ht_node),
111 1000,
112 alloc_fn,
113 cc,
114 free_fn);
115 ddpa->curpool = NULL;
116 ddpa->curpool_limit = NULL;
117 ddpa->curpool_free = NULL;
118
119 return ddpa;
120 }
121
VG_(deleteDedupPA)122 void VG_(deleteDedupPA) ( DedupPoolAlloc* ddpa)
123 {
124 Word i;
125 if (ddpa->ht_elements)
126 // Free data structures used for insertion.
127 VG_(freezeDedupPA) (ddpa, NULL);
128 for (i = 0; i < VG_(sizeXA) (ddpa->pools); i++)
129 ddpa->free_fn (*(UWord **)VG_(indexXA) ( ddpa->pools, i ));
130 VG_(deleteXA) (ddpa->pools);
131 ddpa->free_fn (ddpa);
132 }
133
134 static __inline__
ddpa_align(DedupPoolAlloc * ddpa,UChar * c)135 UChar* ddpa_align ( DedupPoolAlloc* ddpa, UChar *c )
136 {
137 return (UChar*)VG_ROUNDUP(c, ddpa->eltAlign);
138 }
139
140 /* Allocate a new pool or grow the (only) pool for a fixed size ddpa. */
141 __attribute__((noinline))
ddpa_add_new_pool_or_grow(DedupPoolAlloc * ddpa)142 static void ddpa_add_new_pool_or_grow ( DedupPoolAlloc* ddpa )
143 {
144 vg_assert(ddpa);
145
146 if (ddpa->fixedSzb > 0 && ddpa->curpool != NULL) {
147 // Grow (* 2) the current (fixed elt) pool
148 UChar *curpool_align = ddpa_align(ddpa, ddpa->curpool);
149 SizeT curpool_used = ddpa->curpool_free - curpool_align;
150 SizeT curpool_size = ddpa->curpool_limit - ddpa->curpool + 1;
151 UChar *newpool = ddpa->alloc_fn (ddpa->cc, 2 * curpool_size);
152 UChar *newpool_free = ddpa_align (ddpa, newpool);
153 UChar *newpool_limit = newpool + 2 * curpool_size - 1;
154 Word reloc_offset = (Addr)newpool_free - (Addr)curpool_align;
155 ht_node *n;
156
157 VG_(memcpy) (newpool_free, curpool_align, curpool_used);
158 /* We have reallocated the (only) pool. We need to relocate the pointers
159 in the hash table nodes. */
160 VG_(HT_ResetIter) (ddpa->ht_elements);
161 while ((n = VG_(HT_Next) (ddpa->ht_elements))) {
162 n->elt = (void*)((Addr)n->elt + reloc_offset);
163 }
164 newpool_free += curpool_used;
165
166 VG_(dropHeadXA) (ddpa->pools, 1);
167 ddpa->free_fn (ddpa->curpool);
168 ddpa->curpool = newpool;
169 ddpa->curpool_free = newpool_free;
170 ddpa->curpool_limit = newpool_limit;
171 VG_(addToXA)( ddpa->pools, &ddpa->curpool);
172 } else {
173 /* Allocate a new pool, or allocate the first/only pool for a
174 fixed size ddpa. */
175 ddpa->curpool = ddpa->alloc_fn( ddpa->cc, ddpa->poolSzB);
176 ddpa->curpool_limit = ddpa->curpool + ddpa->poolSzB - 1;
177 ddpa->curpool_free = ddpa_align (ddpa, ddpa->curpool);
178 /* add to our collection of pools */
179 VG_(addToXA)( ddpa->pools, &ddpa->curpool );
180 }
181 }
182
183 /* Compare function for 'gen' hash table. No need to compare the key
184 in this function, as the hash table already does it for us,
185 and that in any case, if the data is equal, the keys must also be
186 equal. */
cmp_pool_elt(const void * node1,const void * node2)187 static Word cmp_pool_elt (const void* node1, const void* node2 )
188 {
189 const ht_node* hnode1 = node1;
190 const ht_node* hnode2 = node2;
191
192 /* As this function is called by hashtable, that has already checked
193 for key equality, it is likely that it is the 'good' element.
194 So, we handle the equal case first. */
195 if (hnode1->eltSzBorStrNr == hnode2->eltSzBorStrNr)
196 return VG_(memcmp) (hnode1->elt, hnode2->elt, hnode1->eltSzBorStrNr);
197 else if (hnode1->eltSzBorStrNr < hnode2->eltSzBorStrNr)
198 return -1;
199 else
200 return 1;
201 }
202
203 /* String compare function for 'gen' hash table.
204 Similarly to cmp_pool_elt, no need to compare the key. */
cmp_pool_str(const void * node1,const void * node2)205 static Word cmp_pool_str (const void* node1, const void* node2 )
206 {
207 const ht_node* hnode1 = node1;
208 const ht_node* hnode2 = node2;
209
210 return VG_(strcmp)(hnode1->elt, hnode2->elt);
211 }
212
213 /* Print some stats. */
print_stats(DedupPoolAlloc * ddpa)214 static void print_stats (DedupPoolAlloc *ddpa)
215 {
216 VG_(message)(Vg_DebugMsg,
217 "dedupPA:%s %ld allocs (%u uniq)"
218 " %ld pools (%ld bytes free in last pool)\n",
219 ddpa->cc,
220 (long int) ddpa->nr_alloc_calls,
221 VG_(HT_count_nodes)(ddpa->ht_elements),
222 VG_(sizeXA)(ddpa->pools),
223 ddpa->curpool ?
224 (long int) (ddpa->curpool_limit - ddpa->curpool_free + 1) : 0);
225 if (ddpa->strPA)
226 VG_(HT_print_stats) (ddpa->ht_elements, cmp_pool_str);
227 else
228 VG_(HT_print_stats) (ddpa->ht_elements, cmp_pool_elt);
229 }
230
231 /* Dummy free, as the ht elements are allocated in a pool, and
232 we will destroy the pool in one single operation. */
htelem_dummyfree(void * ht_elem)233 static void htelem_dummyfree(void* ht_elem)
234 {
235 }
236
VG_(freezeDedupPA)237 void VG_(freezeDedupPA) (DedupPoolAlloc *ddpa,
238 void (*shrink_block)(void*, SizeT))
239 {
240 if (VG_(clo_stats)
241 && (VG_(clo_verbosity) > 2 || VG_(debugLog_getLevel) () >= 2)) {
242 print_stats(ddpa);
243 }
244 vg_assert (!ddpa->fixedSzb || VG_(sizeXA) (ddpa->pools) == 1);
245 if (shrink_block && ddpa->curpool_limit > ddpa->curpool_free)
246 (*shrink_block)(ddpa->curpool, ddpa->curpool_free - ddpa->curpool);
247 VG_(HT_destruct) ( ddpa->ht_elements, htelem_dummyfree);
248 ddpa->ht_elements = NULL;
249 VG_(deletePA) (ddpa->ht_node_pa);
250 ddpa->ht_node_pa = NULL;
251 }
252
253
254 // hash function used by gawk and SDBM.
sdbm_hash(const UChar * buf,UInt len)255 static UInt sdbm_hash (const UChar* buf, UInt len )
256 {
257 UInt h;
258 UInt i;
259
260 h = 0;
261 for (i = 0; i < len; i++)
262 h = *buf++ + (h<<6) + (h<<16) - h;
263 return h;
264 }
265
allocEltDedupPA(DedupPoolAlloc * ddpa,SizeT eltSzB,const void * elt)266 static ht_node* allocEltDedupPA (DedupPoolAlloc *ddpa, SizeT eltSzB,
267 const void *elt)
268 {
269 ht_node ht_elt;
270 void* elt_ins;
271 ht_node *ht_ins;
272 vg_assert(ddpa);
273 vg_assert(ddpa->ht_elements);
274
275 ddpa->nr_alloc_calls++;
276
277 ht_elt.key = sdbm_hash (elt, eltSzB);
278
279 ht_elt.elt = elt;
280
281 if (ddpa->strPA)
282 ht_ins = VG_(HT_gen_lookup) (ddpa->ht_elements, &ht_elt, cmp_pool_str);
283 else {
284 ht_elt.eltSzBorStrNr = eltSzB;
285 ht_ins = VG_(HT_gen_lookup) (ddpa->ht_elements, &ht_elt, cmp_pool_elt);
286 }
287 if (ht_ins)
288 return ht_ins;
289
290 /* Not found -> we need to allocate a new element from the pool
291 and insert it in the hash table of inserted elements. */
292
293 // Add a new pool or grow pool if not enough space in the current pool
294 if (eltSzB + ddpa->eltAlign > ddpa->poolSzB) {
295 // Element (+eltAlign for worst case) bigger than the pool size
296 // => allocate a specific pool just for this element
297 UChar *newpool = ddpa->alloc_fn (ddpa->cc, eltSzB + ddpa->eltAlign);
298 /* add to our collection of pools */
299 VG_(addToXA)( ddpa->pools, &newpool );
300 elt_ins = ddpa_align (ddpa, newpool);
301 } else {
302 if (UNLIKELY(ddpa->curpool_free == NULL
303 || ddpa->curpool_free + eltSzB - 1 > ddpa->curpool_limit)) {
304 ddpa_add_new_pool_or_grow (ddpa);
305 }
306 elt_ins = ddpa->curpool_free;
307 ddpa->curpool_free = ddpa_align(ddpa, ddpa->curpool_free + eltSzB);
308 }
309
310
311 VG_(memcpy)(elt_ins, elt, eltSzB);
312 ht_ins = VG_(allocEltPA) (ddpa->ht_node_pa);
313 ht_ins->key = ht_elt.key;
314 if (ddpa->strPA)
315 ht_ins->eltSzBorStrNr = VG_(HT_count_nodes)(ddpa->ht_elements) + 1;
316 else
317 ht_ins->eltSzBorStrNr = eltSzB;
318 ht_ins->elt = elt_ins;
319 VG_(HT_add_node)(ddpa->ht_elements, ht_ins);
320 return ht_ins;
321 }
322
VG_(allocEltDedupPA)323 const void* VG_(allocEltDedupPA) (DedupPoolAlloc *ddpa, SizeT eltSzB,
324 const void *elt)
325 {
326 return allocEltDedupPA(ddpa, eltSzB, elt)->elt;
327 }
328
VG_(allocStrDedupPA)329 UInt VG_(allocStrDedupPA) (DedupPoolAlloc *ddpa,
330 const HChar* str,
331 Bool* newStr)
332 {
333 if (!ddpa->strPA) {
334 // First insertion in this ddpa
335 vg_assert (ddpa->nr_alloc_calls == 0);
336 vg_assert (ddpa->fixedSzb == 0);
337 ddpa->strPA = True;
338 }
339
340 const UInt nr_str = VG_(HT_count_nodes)(ddpa->ht_elements);
341 const ht_node* ht_ins = allocEltDedupPA(ddpa, VG_(strlen)(str)+1, str);
342
343 *newStr = nr_str < VG_(HT_count_nodes)(ddpa->ht_elements);
344 return ht_ins->eltSzBorStrNr;
345 }
346
347 static __inline__
elt2nr(DedupPoolAlloc * ddpa,const void * dedup_elt)348 UInt elt2nr (DedupPoolAlloc *ddpa, const void *dedup_elt)
349 {
350 vg_assert (dedup_elt >= (const void *)ddpa->curpool
351 && dedup_elt < (const void *)ddpa->curpool_free);
352 return 1 + ((const UChar*)dedup_elt - (const UChar *)ddpa->curpool)
353 / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);
354 }
355
VG_(allocFixedEltDedupPA)356 UInt VG_(allocFixedEltDedupPA) (DedupPoolAlloc *ddpa,
357 SizeT eltSzB, const void *elt)
358 {
359 if (ddpa->fixedSzb == 0) {
360 // First insertion in this ddpa
361 vg_assert (!ddpa->strPA);
362 vg_assert (ddpa->nr_alloc_calls == 0);
363 vg_assert (eltSzB > 0);
364 ddpa->fixedSzb = eltSzB;
365 }
366 vg_assert (ddpa->fixedSzb == eltSzB);
367 const void *dedup_elt = VG_(allocEltDedupPA) (ddpa, eltSzB, elt);
368 return elt2nr (ddpa, dedup_elt);
369 }
370
VG_(indexEltNumber)371 void* VG_(indexEltNumber) (DedupPoolAlloc *ddpa,
372 UInt eltNr)
373 {
374 void *dedup_elt;
375
376 dedup_elt = ddpa->curpool
377 + (eltNr - 1) * VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);
378
379 vg_assert ((UChar*)dedup_elt >= ddpa->curpool
380 && (UChar*)dedup_elt < ddpa->curpool_free);
381
382 return dedup_elt;
383 }
384
VG_(sizeDedupPA)385 UInt VG_(sizeDedupPA) (DedupPoolAlloc *ddpa)
386 {
387 if (ddpa->curpool == NULL)
388 return 0;
389
390 vg_assert (ddpa->fixedSzb);
391 return (ddpa->curpool_free - ddpa_align(ddpa, ddpa->curpool))
392 / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);
393 }
394