• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2007-2015. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/container for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10 
11 #include <boost/container/detail/alloc_lib.h>
12 
13 #include "errno.h"   //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H
14 #include "limits.h"  //CHAR_BIT
15 #ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS
16 #define FOOTERS      1
17 #endif
18 #define USE_LOCKS    1
19 #define MSPACES      1
20 #define NO_MALLINFO  1
21 #define NO_MALLOC_STATS 1
22 //disable sbrk as it's deprecated in some systems and weakens ASLR
23 #define HAVE_MORECORE 0
24 
25 
26 #if !defined(NDEBUG)
27    #if !defined(DEBUG)
28       #define DEBUG 1
29       #define DL_DEBUG_DEFINED
30    #endif
31 #endif
32 
33 #define USE_DL_PREFIX
34 
35 #ifdef __GNUC__
36 #define FORCEINLINE inline
37 #endif
38 #include "dlmalloc_2_8_6.c"
39 
40 #ifdef _MSC_VER
41 #pragma warning (push)
42 #pragma warning (disable : 4127)
43 #pragma warning (disable : 4267)
44 #pragma warning (disable : 4127)
45 #pragma warning (disable : 4702)
46 #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/
47 #pragma warning (disable : 4251 4231 4660) /*dll warnings*/
48 #endif
49 
50 #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p)))
51 
52 static size_t s_allocated_memory;
53 
54 ///////////////////////////////////////////////////////////////
55 ///////////////////////////////////////////////////////////////
56 ///////////////////////////////////////////////////////////////
57 //
58 //         SLIGHTLY MODIFIED DLMALLOC FUNCTIONS
59 //
60 ///////////////////////////////////////////////////////////////
61 ///////////////////////////////////////////////////////////////
62 ///////////////////////////////////////////////////////////////
63 
64 //This function is equal to mspace_free
65 //replacing PREACTION with 0 and POSTACTION with nothing
mspace_free_lockless(mspace msp,void * mem)66 static void mspace_free_lockless(mspace msp, void* mem)
67 {
68   if (mem != 0) {
69     mchunkptr p  = mem2chunk(mem);
70 #if FOOTERS
71     mstate fm = get_mstate_for(p);
72     msp = msp; /* placate people compiling -Wunused */
73 #else /* FOOTERS */
74     mstate fm = (mstate)msp;
75 #endif /* FOOTERS */
76     if (!ok_magic(fm)) {
77       USAGE_ERROR_ACTION(fm, p);
78       return;
79     }
80     if (!0){//PREACTION(fm)) {
81       check_inuse_chunk(fm, p);
82       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
83         size_t psize = chunksize(p);
84         mchunkptr next = chunk_plus_offset(p, psize);
85         if (!pinuse(p)) {
86           size_t prevsize = p->prev_foot;
87           if (is_mmapped(p)) {
88             psize += prevsize + MMAP_FOOT_PAD;
89             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
90               fm->footprint -= psize;
91             goto postaction;
92           }
93           else {
94             mchunkptr prev = chunk_minus_offset(p, prevsize);
95             psize += prevsize;
96             p = prev;
97             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
98               if (p != fm->dv) {
99                 unlink_chunk(fm, p, prevsize);
100               }
101               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
102                 fm->dvsize = psize;
103                 set_free_with_pinuse(p, psize, next);
104                 goto postaction;
105               }
106             }
107             else
108               goto erroraction;
109           }
110         }
111 
112         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
113           if (!cinuse(next)) {  /* consolidate forward */
114             if (next == fm->top) {
115               size_t tsize = fm->topsize += psize;
116               fm->top = p;
117               p->head = tsize | PINUSE_BIT;
118               if (p == fm->dv) {
119                 fm->dv = 0;
120                 fm->dvsize = 0;
121               }
122               if (should_trim(fm, tsize))
123                 sys_trim(fm, 0);
124               goto postaction;
125             }
126             else if (next == fm->dv) {
127               size_t dsize = fm->dvsize += psize;
128               fm->dv = p;
129               set_size_and_pinuse_of_free_chunk(p, dsize);
130               goto postaction;
131             }
132             else {
133               size_t nsize = chunksize(next);
134               psize += nsize;
135               unlink_chunk(fm, next, nsize);
136               set_size_and_pinuse_of_free_chunk(p, psize);
137               if (p == fm->dv) {
138                 fm->dvsize = psize;
139                 goto postaction;
140               }
141             }
142           }
143           else
144             set_free_with_pinuse(p, psize, next);
145 
146           if (is_small(psize)) {
147             insert_small_chunk(fm, p, psize);
148             check_free_chunk(fm, p);
149           }
150           else {
151             tchunkptr tp = (tchunkptr)p;
152             insert_large_chunk(fm, tp, psize);
153             check_free_chunk(fm, p);
154             if (--fm->release_checks == 0)
155               release_unused_segments(fm);
156           }
157           goto postaction;
158         }
159       }
160     erroraction:
161       USAGE_ERROR_ACTION(fm, p);
162     postaction:
163       ;//POSTACTION(fm);
164     }
165   }
166 }
167 
168 //This function is equal to mspace_malloc
169 //replacing PREACTION with 0 and POSTACTION with nothing
mspace_malloc_lockless(mspace msp,size_t bytes)170 void* mspace_malloc_lockless(mspace msp, size_t bytes)
171 {
172   mstate ms = (mstate)msp;
173   if (!ok_magic(ms)) {
174     USAGE_ERROR_ACTION(ms,ms);
175     return 0;
176   }
177     if (!0){//PREACTION(ms)) {
178     void* mem;
179     size_t nb;
180     if (bytes <= MAX_SMALL_REQUEST) {
181       bindex_t idx;
182       binmap_t smallbits;
183       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
184       idx = small_index(nb);
185       smallbits = ms->smallmap >> idx;
186 
187       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
188         mchunkptr b, p;
189         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
190         b = smallbin_at(ms, idx);
191         p = b->fd;
192         assert(chunksize(p) == small_index2size(idx));
193         unlink_first_small_chunk(ms, b, p, idx);
194         set_inuse_and_pinuse(ms, p, small_index2size(idx));
195         mem = chunk2mem(p);
196         check_malloced_chunk(ms, mem, nb);
197         goto postaction;
198       }
199 
200       else if (nb > ms->dvsize) {
201         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
202           mchunkptr b, p, r;
203           size_t rsize;
204           bindex_t i;
205           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
206           binmap_t leastbit = least_bit(leftbits);
207           compute_bit2idx(leastbit, i);
208           b = smallbin_at(ms, i);
209           p = b->fd;
210           assert(chunksize(p) == small_index2size(i));
211           unlink_first_small_chunk(ms, b, p, i);
212           rsize = small_index2size(i) - nb;
213           /* Fit here cannot be remainderless if 4byte sizes */
214           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
215             set_inuse_and_pinuse(ms, p, small_index2size(i));
216           else {
217             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
218             r = chunk_plus_offset(p, nb);
219             set_size_and_pinuse_of_free_chunk(r, rsize);
220             replace_dv(ms, r, rsize);
221           }
222           mem = chunk2mem(p);
223           check_malloced_chunk(ms, mem, nb);
224           goto postaction;
225         }
226 
227         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
228           check_malloced_chunk(ms, mem, nb);
229           goto postaction;
230         }
231       }
232     }
233     else if (bytes >= MAX_REQUEST)
234       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
235     else {
236       nb = pad_request(bytes);
237       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
238         check_malloced_chunk(ms, mem, nb);
239         goto postaction;
240       }
241     }
242 
243     if (nb <= ms->dvsize) {
244       size_t rsize = ms->dvsize - nb;
245       mchunkptr p = ms->dv;
246       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
247         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
248         ms->dvsize = rsize;
249         set_size_and_pinuse_of_free_chunk(r, rsize);
250         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
251       }
252       else { /* exhaust dv */
253         size_t dvs = ms->dvsize;
254         ms->dvsize = 0;
255         ms->dv = 0;
256         set_inuse_and_pinuse(ms, p, dvs);
257       }
258       mem = chunk2mem(p);
259       check_malloced_chunk(ms, mem, nb);
260       goto postaction;
261     }
262 
263     else if (nb < ms->topsize) { /* Split top */
264       size_t rsize = ms->topsize -= nb;
265       mchunkptr p = ms->top;
266       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
267       r->head = rsize | PINUSE_BIT;
268       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
269       mem = chunk2mem(p);
270       check_top_chunk(ms, ms->top);
271       check_malloced_chunk(ms, mem, nb);
272       goto postaction;
273     }
274 
275     mem = sys_alloc(ms, nb);
276 
277   postaction:
278       ;//POSTACTION(ms);
279     return mem;
280   }
281 
282   return 0;
283 }
284 
285 //This function is equal to try_realloc_chunk but handling
286 //minimum and desired bytes
try_realloc_chunk_with_min(mstate m,mchunkptr p,size_t min_nb,size_t des_nb,int can_move)287 static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb, size_t des_nb, int can_move)
288 {
289   mchunkptr newp = 0;
290   size_t oldsize = chunksize(p);
291   mchunkptr next = chunk_plus_offset(p, oldsize);
292   if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
293               ok_next(p, next) && ok_pinuse(next))) {
294     if (is_mmapped(p)) {
295       newp = mmap_resize(m, p, des_nb, can_move);
296       if(!newp)   //mmap does not return how many bytes we could reallocate, so go the minimum
297          newp = mmap_resize(m, p, min_nb, can_move);
298     }
299     else if (oldsize >= min_nb) {             /* already big enough */
300       size_t nb = oldsize >= des_nb ? des_nb : oldsize;
301       size_t rsize = oldsize - nb;
302       if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
303         mchunkptr r = chunk_plus_offset(p, nb);
304         set_inuse(m, p, nb);
305         set_inuse(m, r, rsize);
306         dispose_chunk(m, r, rsize);
307       }
308       newp = p;
309     }
310     else if (next == m->top) {  /* extend into top */
311       if (oldsize + m->topsize > min_nb) {
312         size_t nb = (oldsize + m->topsize) > des_nb ? des_nb : (oldsize + m->topsize - MALLOC_ALIGNMENT);
313         size_t newsize = oldsize + m->topsize;
314         size_t newtopsize = newsize - nb;
315         mchunkptr newtop = chunk_plus_offset(p, nb);
316         set_inuse(m, p, nb);
317         newtop->head = newtopsize |PINUSE_BIT;
318         m->top = newtop;
319         m->topsize = newtopsize;
320         newp = p;
321       }
322     }
323     else if (next == m->dv) { /* extend into dv */
324       size_t dvs = m->dvsize;
325       if (oldsize + dvs >= min_nb) {
326         size_t nb = (oldsize + dvs) >= des_nb ? des_nb : (oldsize + dvs);
327         size_t dsize = oldsize + dvs - nb;
328         if (dsize >= MIN_CHUNK_SIZE) {
329           mchunkptr r = chunk_plus_offset(p, nb);
330           mchunkptr n = chunk_plus_offset(r, dsize);
331           set_inuse(m, p, nb);
332           set_size_and_pinuse_of_free_chunk(r, dsize);
333           clear_pinuse(n);
334           m->dvsize = dsize;
335           m->dv = r;
336         }
337         else { /* exhaust dv */
338           size_t newsize = oldsize + dvs;
339           set_inuse(m, p, newsize);
340           m->dvsize = 0;
341           m->dv = 0;
342         }
343         newp = p;
344       }
345     }
346     else if (!cinuse(next)) { /* extend into next free chunk */
347       size_t nextsize = chunksize(next);
348       if (oldsize + nextsize >= min_nb) {
349         size_t nb = (oldsize + nextsize) >= des_nb ? des_nb : (oldsize + nextsize);
350         size_t rsize = oldsize + nextsize - nb;
351         unlink_chunk(m, next, nextsize);
352         if (rsize < MIN_CHUNK_SIZE) {
353           size_t newsize = oldsize + nextsize;
354           set_inuse(m, p, newsize);
355         }
356         else {
357           mchunkptr r = chunk_plus_offset(p, nb);
358           set_inuse(m, p, nb);
359           set_inuse(m, r, rsize);
360           dispose_chunk(m, r, rsize);
361         }
362         newp = p;
363       }
364     }
365   }
366   else {
367     USAGE_ERROR_ACTION(m, chunk2mem(p));
368   }
369   return newp;
370 }
371 
372 ///////////////////////////////////////////////////////////////
373 ///////////////////////////////////////////////////////////////
374 ///////////////////////////////////////////////////////////////
375 //
376 //         NEW FUNCTIONS BASED ON DLMALLOC INTERNALS
377 //
378 ///////////////////////////////////////////////////////////////
379 ///////////////////////////////////////////////////////////////
380 ///////////////////////////////////////////////////////////////
381 
382 #define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO)     ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO))
383 #define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO)       ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO))
384 #define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1)))
385 #define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO)   (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO)
386 
387 /* Greatest common divisor and least common multiple
388    gcd is an algorithm that calculates the greatest common divisor of two
389    integers, using Euclid's algorithm.
390 
391    Pre: A > 0 && B > 0
392    Recommended: A > B*/
393 #define CALCULATE_GCD(A, B, OUT)\
394 {\
395    size_t a = A;\
396    size_t b = B;\
397    do\
398    {\
399       size_t tmp = b;\
400       b = a % b;\
401       a = tmp;\
402    } while (b != 0);\
403 \
404    OUT = a;\
405 }
406 
407 /* lcm is an algorithm that calculates the least common multiple of two
408    integers.
409 
410    Pre: A > 0 && B > 0
411    Recommended: A > B*/
412 #define CALCULATE_LCM(A, B, OUT)\
413 {\
414    CALCULATE_GCD(A, B, OUT);\
415    OUT = (A / OUT)*B;\
416 }
417 
calculate_lcm_and_needs_backwards_lcmed(size_t backwards_multiple,size_t received_size,size_t size_to_achieve,size_t * plcm,size_t * pneeds_backwards_lcmed)418 static int calculate_lcm_and_needs_backwards_lcmed
419    (size_t backwards_multiple, size_t received_size, size_t size_to_achieve,
420     size_t *plcm, size_t *pneeds_backwards_lcmed)
421 {
422    /* Now calculate lcm */
423    size_t max = backwards_multiple;
424    size_t min = MALLOC_ALIGNMENT;
425    size_t needs_backwards;
426    size_t needs_backwards_lcmed;
427    size_t lcm;
428    size_t current_forward;
429    /*Swap if necessary*/
430    if(max < min){
431       size_t tmp = min;
432       min = max;
433       max = tmp;
434    }
435    /*Check if it's power of two*/
436    if((backwards_multiple & (backwards_multiple-1)) == 0){
437       if(0 != (size_to_achieve & ((backwards_multiple-1)))){
438          USAGE_ERROR_ACTION(m, oldp);
439          return 0;
440       }
441 
442       lcm = max;
443       /*If we want to use minbytes data to get a buffer between maxbytes
444       and minbytes if maxbytes can't be achieved, calculate the
445       biggest of all possibilities*/
446       current_forward = GET_TRUNCATED_PO2_SIZE(received_size, backwards_multiple);
447       needs_backwards = size_to_achieve - current_forward;
448       assert((needs_backwards % backwards_multiple) == 0);
449       needs_backwards_lcmed = GET_ROUNDED_PO2_SIZE(needs_backwards, lcm);
450       *plcm = lcm;
451       *pneeds_backwards_lcmed = needs_backwards_lcmed;
452       return 1;
453    }
454    /*Check if it's multiple of alignment*/
455    else if((backwards_multiple & (MALLOC_ALIGNMENT - 1u)) == 0){
456       lcm = backwards_multiple;
457       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
458       //No need to round needs_backwards because backwards_multiple == lcm
459       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
460       assert((needs_backwards_lcmed & (MALLOC_ALIGNMENT - 1u)) == 0);
461       *plcm = lcm;
462       *pneeds_backwards_lcmed = needs_backwards_lcmed;
463       return 1;
464    }
465    /*Check if it's multiple of the half of the alignmment*/
466    else if((backwards_multiple & ((MALLOC_ALIGNMENT/2u) - 1u)) == 0){
467       lcm = backwards_multiple*2u;
468       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
469       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
470       if(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
471       //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
472          needs_backwards_lcmed += backwards_multiple;
473       assert((needs_backwards_lcmed % lcm) == 0);
474       *plcm = lcm;
475       *pneeds_backwards_lcmed = needs_backwards_lcmed;
476       return 1;
477    }
478    /*Check if it's multiple of the quarter of the alignmment*/
479    else if((backwards_multiple & ((MALLOC_ALIGNMENT/4u) - 1u)) == 0){
480       size_t remainder;
481       lcm = backwards_multiple*4u;
482       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
483       needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
484       //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
485          //needs_backwards_lcmed += backwards_multiple;
486       if(0 != (remainder = ((needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))>>(MALLOC_ALIGNMENT/8u)))){
487          if(backwards_multiple & MALLOC_ALIGNMENT/2u){
488             needs_backwards_lcmed += (remainder)*backwards_multiple;
489          }
490          else{
491             needs_backwards_lcmed += (4-remainder)*backwards_multiple;
492          }
493       }
494       assert((needs_backwards_lcmed % lcm) == 0);
495       *plcm = lcm;
496       *pneeds_backwards_lcmed = needs_backwards_lcmed;
497       return 1;
498    }
499    else{
500       CALCULATE_LCM(max, min, lcm);
501       /*If we want to use minbytes data to get a buffer between maxbytes
502       and minbytes if maxbytes can't be achieved, calculate the
503       biggest of all possibilities*/
504       current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
505       needs_backwards = size_to_achieve - current_forward;
506       assert((needs_backwards % backwards_multiple) == 0);
507       needs_backwards_lcmed = GET_ROUNDED_SIZE(needs_backwards, lcm);
508       *plcm = lcm;
509       *pneeds_backwards_lcmed = needs_backwards_lcmed;
510       return 1;
511    }
512 }
513 
internal_grow_both_sides(mstate m,allocation_type command,void * oldmem,size_t minbytes,size_t maxbytes,size_t * received_size,size_t backwards_multiple,int only_preferred_backwards)514 static void *internal_grow_both_sides
515                          (mstate m
516                          ,allocation_type command
517                          ,void *oldmem
518                          ,size_t minbytes
519                          ,size_t maxbytes
520                          ,size_t *received_size
521                          ,size_t backwards_multiple
522                          ,int only_preferred_backwards)
523 {
524    mchunkptr oldp = mem2chunk(oldmem);
525    size_t oldsize = chunksize(oldp);
526    *received_size = oldsize - overhead_for(oldp);
527    if(minbytes <= *received_size)
528       return oldmem;
529 
530    if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp))) {
531       if(command & BOOST_CONTAINER_EXPAND_FWD){
532          if(try_realloc_chunk_with_min(m, oldp, request2size(minbytes), request2size(maxbytes), 0)){
533             check_inuse_chunk(m, oldp);
534             *received_size = DL_SIZE_IMPL(oldmem);
535             s_allocated_memory += chunksize(oldp) - oldsize;
536             return oldmem;
537          }
538       }
539       else{
540          *received_size = DL_SIZE_IMPL(oldmem);
541          if(*received_size >= maxbytes)
542             return oldmem;
543       }
544 /*
545       Should we check this?
546       if(backwards_multiple &&
547          (0 != (minbytes % backwards_multiple) &&
548           0 != (maxbytes % backwards_multiple)) ){
549          USAGE_ERROR_ACTION(m, oldp);
550          return 0;
551       }
552 */
553       /* We reach here only if forward expansion fails */
554       if(!(command & BOOST_CONTAINER_EXPAND_BWD) || pinuse(oldp)){
555          return 0;
556       }
557       {
558          size_t prevsize = oldp->prev_foot;
559          if ((prevsize & USE_MMAP_BIT) != 0){
560             /*Return failure the previous chunk was mmapped.
561               mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without
562               copying (MREMAP_MAYMOVE must be also set).*/
563             return 0;
564          }
565          else {
566             mchunkptr prev = chunk_minus_offset(oldp, prevsize);
567             size_t dsize = oldsize + prevsize;
568             size_t needs_backwards_lcmed;
569             size_t lcm;
570 
571             /* Let's calculate the number of extra bytes of data before the current
572             block's begin. The value is a multiple of backwards_multiple
573             and the alignment*/
574             if(!calculate_lcm_and_needs_backwards_lcmed
575                ( backwards_multiple, *received_size
576                , only_preferred_backwards ? maxbytes : minbytes
577                , &lcm, &needs_backwards_lcmed)
578                || !RTCHECK(ok_address(m, prev))){
579                USAGE_ERROR_ACTION(m, oldp);
580                return 0;
581             }
582             /* Check if previous block has enough size */
583             else if(prevsize < needs_backwards_lcmed){
584                /* preferred size? */
585                return 0;
586             }
587             /* Now take all next space. This must succeed, as we've previously calculated the correct size */
588             if(command & BOOST_CONTAINER_EXPAND_FWD){
589                if(!try_realloc_chunk_with_min(m, oldp, request2size(*received_size), request2size(*received_size), 0)){
590                   assert(0);
591                }
592                check_inuse_chunk(m, oldp);
593                *received_size = DL_SIZE_IMPL(oldmem);
594                s_allocated_memory += chunksize(oldp) - oldsize;
595                oldsize = chunksize(oldp);
596                dsize = oldsize + prevsize;
597             }
598             /* We need a minimum size to split the previous one */
599             if(prevsize >= (needs_backwards_lcmed + MIN_CHUNK_SIZE)){
600                mchunkptr r  = chunk_minus_offset(oldp, needs_backwards_lcmed);
601                size_t rsize = oldsize + needs_backwards_lcmed;
602                size_t newprevsize = dsize - rsize;
603                int prev_was_dv = prev == m->dv;
604 
605                assert(newprevsize >= MIN_CHUNK_SIZE);
606 
607                if (prev_was_dv) {
608                   m->dvsize = newprevsize;
609                }
610                else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */
611                   unlink_chunk(m, prev, prevsize);
612                   insert_chunk(m, prev, newprevsize);
613                }
614 
615                set_size_and_pinuse_of_free_chunk(prev, newprevsize);
616                clear_pinuse(r);
617                set_inuse(m, r, rsize);
618                check_malloced_chunk(m, chunk2mem(r), rsize);
619                *received_size = chunksize(r) - overhead_for(r);
620                s_allocated_memory += chunksize(r) - oldsize;
621                return chunk2mem(r);
622             }
623             /* Check if there is no place to create a new block and
624                the whole new block is multiple of the backwards expansion multiple */
625             else if(prevsize >= needs_backwards_lcmed && !(prevsize % lcm)) {
626                /* Just merge the whole previous block */
627                /* prevsize is multiple of lcm (and backwards_multiple)*/
628                *received_size  += prevsize;
629 
630                if (prev != m->dv) {
631                   unlink_chunk(m, prev, prevsize);
632                }
633                else{
634                   m->dvsize = 0;
635                   m->dv     = 0;
636                }
637                set_inuse(m, prev, dsize);
638                check_malloced_chunk(m, chunk2mem(prev), dsize);
639                s_allocated_memory += chunksize(prev) - oldsize;
640                return chunk2mem(prev);
641             }
642             else{
643                /* Previous block was big enough but there is no room
644                   to create an empty block and taking the whole block does
645                   not fulfill alignment requirements */
646                return 0;
647             }
648          }
649       }
650    }
651    else{
652       USAGE_ERROR_ACTION(m, oldmem);
653       return 0;
654    }
655    return 0;
656 }
657 
658 /* This is similar to mmap_resize but:
659    * Only to shrink
660    * It takes min and max sizes
661    * Takes additional 'do_commit' argument to obtain the final
662      size before doing the real shrink operation.
663 */
internal_mmap_shrink_in_place(mstate m,mchunkptr oldp,size_t nbmin,size_t nbmax,size_t * received_size,int do_commit)664 static int internal_mmap_shrink_in_place(mstate m, mchunkptr oldp, size_t nbmin, size_t nbmax, size_t *received_size, int do_commit)
665 {
666   size_t oldsize = chunksize(oldp);
667   *received_size = oldsize;
668   #if HAVE_MREMAP
669   if (is_small(nbmax)) /* Can't shrink mmap regions below small size */
670     return 0;
671   {
672    size_t effective_min = nbmin > MIN_LARGE_SIZE ? nbmin : MIN_LARGE_SIZE;
673    /* Keep old chunk if big enough but not too big */
674    if (oldsize >= effective_min + SIZE_T_SIZE &&
675          (oldsize - effective_min) <= (mparams.granularity << 1))
676       return 0;
677    /* Now calculate new sizes */
678    {
679       size_t offset = oldp->prev_foot;
680       size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
681       size_t newmmsize = mmap_align(effective_min + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
682       *received_size = newmmsize;
683       if(!do_commit){
684          const int flags = 0; /* placate people compiling -Wunused */
685          char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
686                                        oldmmsize, newmmsize, flags);
687          /*This must always succeed */
688          if(!cp){
689             USAGE_ERROR_ACTION(m, m);
690             return 0;
691          }
692          {
693          mchunkptr newp = (mchunkptr)(cp + offset);
694          size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
695          newp->head = psize;
696          mark_inuse_foot(m, newp, psize);
697          chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
698          chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
699 
700          if (cp < m->least_addr)
701             m->least_addr = cp;
702          if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
703             m->max_footprint = m->footprint;
704          check_mmapped_chunk(m, newp);
705          }
706       }
707     }
708     return 1;
709   }
710   #else  //#if HAVE_MREMAP
711   (void)m;
712   (void)oldp;
713   (void)nbmin;
714   (void)nbmax;
715   (void)received_size;
716   (void)do_commit;
717   return 0;
718   #endif //#if HAVE_MREMAP
719 }
720 
internal_shrink(mstate m,void * oldmem,size_t minbytes,size_t maxbytes,size_t * received_size,int do_commit)721 static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxbytes, size_t *received_size, int do_commit)
722 {
723    *received_size = chunksize(mem2chunk(oldmem)) - overhead_for(mem2chunk(oldmem));
724    if (minbytes >= MAX_REQUEST || maxbytes >= MAX_REQUEST) {
725       MALLOC_FAILURE_ACTION;
726       return 0;
727    }
728    else if(minbytes < MIN_REQUEST){
729       minbytes = MIN_REQUEST;
730    }
731    if (minbytes > maxbytes) {
732       return 0;
733    }
734 
735    {
736       mchunkptr oldp = mem2chunk(oldmem);
737       size_t oldsize = chunksize(oldp);
738       mchunkptr next = chunk_plus_offset(oldp, oldsize);
739       void* extra = 0;
740 
741       /* Try to either shrink or extend into top. Else malloc-copy-free*/
742       if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
743                   ok_next(oldp, next) && ok_pinuse(next))) {
744          size_t nbmin = request2size(minbytes);
745          size_t nbmax = request2size(maxbytes);
746 
747          if (nbmin > oldsize){
748             /* Return error if old size is too small */
749          }
750          else if (is_mmapped(oldp)){
751             return internal_mmap_shrink_in_place(m, oldp, nbmin, nbmax, received_size, do_commit);
752          }
753          else{ // nbmin <= oldsize /* already big enough*/
754             size_t nb = nbmin;
755             size_t rsize = oldsize - nb;
756             if (rsize >= MIN_CHUNK_SIZE) {
757                if(do_commit){
758                   mchunkptr remainder = chunk_plus_offset(oldp, nb);
759                   set_inuse(m, oldp, nb);
760                   set_inuse(m, remainder, rsize);
761                   s_allocated_memory -= rsize;
762                   extra = chunk2mem(remainder);
763                   mspace_free_lockless(m, extra);
764                   check_inuse_chunk(m, oldp);
765                }
766                *received_size = nb - overhead_for(oldp);
767                return 1;
768             }
769          }
770       }
771       else {
772          USAGE_ERROR_ACTION(m, oldmem);
773       }
774       return 0;
775    }
776 }
777 
778 #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096
779 #define SQRT_MAX_SIZE_T           (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2))
780 
internal_node_multialloc(mstate m,size_t n_elements,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)781 static int internal_node_multialloc
782 (mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
783 	void*     mem;            /* malloced aggregate space */
784 	mchunkptr p;              /* corresponding chunk */
785 	size_t    remainder_size; /* remaining bytes while splitting */
786 	flag_t    was_enabled;    /* to disable mmap */
787 	size_t    elements_per_segment = 0;
788 	size_t    element_req_size = request2size(element_size);
789 	boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
790 
791 	/*Error if wrong element_size parameter */
792 	if (!element_size ||
793 		/*OR Error if n_elements less than contiguous_elements */
794 		((contiguous_elements + 1) > (BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) ||
795 		/* OR Error if integer overflow */
796 		(SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) &&
797 		(MAX_SIZE_T / element_req_size) < contiguous_elements)) {
798 		return 0;
799 	}
800 	switch (contiguous_elements) {
801 	case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
802 	{
803 		/* Default contiguous, just check that we can store at least one element */
804 		elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM / element_req_size;
805 		elements_per_segment += (size_t)(!elements_per_segment);
806 	}
807 	break;
808 	case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS:
809 		/* All elements should be allocated in a single call */
810 		elements_per_segment = n_elements;
811 		break;
812 	default:
813 		/* Allocate in chunks of "contiguous_elements" */
814 		elements_per_segment = contiguous_elements;
815 	}
816 
817 	{
818 		size_t    i;
819 		size_t next_i;
820 		/*
821 		   Allocate the aggregate chunk.  First disable direct-mmapping so
822 		   malloc won't use it, since we would not be able to later
823 		   free/realloc space internal to a segregated mmap region.
824 		*/
825 		was_enabled = use_mmap(m);
826 		disable_mmap(m);
827 		for (i = 0; i != n_elements; i = next_i)
828 		{
829 			size_t accum_size;
830 			size_t n_elements_left = n_elements - i;
831 			next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment);
832 			accum_size = element_req_size * (next_i - i);
833 
834 			mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
835 			if (mem == 0) {
836 				BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
837 				while (i) {
838 					void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it);
839 					--i;
840 					BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
841                s_allocated_memory -= chunksize(mem2chunk(addr));
842 					mspace_free_lockless(m, addr);
843 				}
844 				if (was_enabled)
845 					enable_mmap(m);
846 				return 0;
847 			}
848 			p = mem2chunk(mem);
849 			remainder_size = chunksize(p);
850 			s_allocated_memory += remainder_size;
851 
852 			assert(!is_mmapped(p));
853 			{  /* split out elements */
854 			   //void *mem_orig = mem;
855 			   //boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
856 				size_t num_elements = next_i - i;
857 
858 				size_t num_loops = num_elements - 1;
859 				remainder_size -= element_req_size * num_loops;
860 				while (num_loops) {
861 					--num_loops;
862 					//void **mem_prev = ((void**)mem);
863 					set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size);
864 					BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem);
865 					p = chunk_plus_offset(p, element_req_size);
866 					mem = chunk2mem(p);
867 					//*mem_prev = mem;
868 				}
869 				set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
870 				BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem);
871 				//BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
872 			}
873 		}
874 		if (was_enabled)
875 			enable_mmap(m);
876 	}
877 	return 1;
878 }
879 
880 #define BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
881 #ifndef BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
882 
883 #define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \
884    *((void**)(THISMEM)) = *((void**)((NEXTMEM)))
885 
886 //This function is based on internal_bulk_free
887 //replacing iteration over array[] with boost_cont_memchain.
888 //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
889 //After forward merging, backwards merging is also tried
internal_multialloc_free(mstate m,boost_cont_memchain * pchain)890 static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
891 {
892 #if FOOTERS
893 	boost_cont_memchain ret_chain;
894 	BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain);
895 #endif
896 	if (!PREACTION(m)) {
897 		boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
898 		while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
899 			void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
900 			mchunkptr a_p = mem2chunk(a_mem);
901 			size_t psize = chunksize(a_p);
902 #if FOOTERS
903 			if (get_mstate_for(a_p) != m) {
904 				BOOST_CONTAINER_MEMIT_NEXT(a_it);
905 				BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem);
906 				continue;
907 			}
908 #endif
909 			check_inuse_chunk(m, a_p);
910 			if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) {
911 				while (1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */
912 					boost_cont_memchain_it b_it = a_it;
913 					BOOST_CONTAINER_MEMIT_NEXT(b_it);
914 					if (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)) {
915 						void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it);
916 						mchunkptr b_p = mem2chunk(b_mem);
917 						if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */
918 							psize += chunksize(b_p);
919 							set_inuse(m, a_p, psize);
920 							BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem);
921 							continue;
922 						}
923 						if (RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))) {
924 							/* b chunk is contiguous and previous so a's size can be added to b */
925 							if (a_p == next_chunk(b_p)) {
926 								psize += chunksize(b_p);
927 								set_inuse(m, b_p, psize);
928 								a_it = b_it;
929 								a_p = b_p;
930 								a_mem = b_mem;
931 								continue;
932 							}
933 						}
934 					}
935 					/* Normal deallocation starts again in the outer loop */
936 					a_it = b_it;
937 					s_allocated_memory -= psize;
938 					dispose_chunk(m, a_p, psize);
939 					break;
940 				}
941 			}
942 			else {
943 				CORRUPTION_ERROR_ACTION(m);
944 				break;
945 			}
946 		}
947 		if (should_trim(m, m->topsize))
948 			sys_trim(m, 0);
949 		POSTACTION(m);
950 	}
951 #if FOOTERS
952 	{
953 		boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
954 		BOOST_CONTAINER_MEMCHAIN_INIT(pchain);
955 		BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER
956 		(pchain
957 			, last_pchain
958 			, BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain)
959 			, BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain)
960 			, BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain)
961 		);
962 	}
963 #endif
964 }
965 
966 #else	//BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
967 
968 //This function is based on internal_bulk_free
969 //replacing iteration over array[] with boost_cont_memchain.
970 //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
971 //After forward merging, backwards merging is also tried
internal_multialloc_free(mstate m,boost_cont_memchain * pchain)972 static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
973 {
974 	if (!PREACTION(m)) {
975 		boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
976 		while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
977 			void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
978 			BOOST_CONTAINER_MEMIT_NEXT(a_it);
979          s_allocated_memory -= chunksize(mem2chunk(a_mem));
980 			mspace_free_lockless(m, a_mem);
981 		}
982 		POSTACTION(m);
983 	}
984 }
985 
986 #endif	//BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
987 
internal_multialloc_arrays(mstate m,size_t n_elements,const size_t * sizes,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)988 static int internal_multialloc_arrays
989    (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
990    void*     mem;            /* malloced aggregate space */
991    mchunkptr p;              /* corresponding chunk */
992    size_t    remainder_size; /* remaining bytes while splitting */
993    flag_t    was_enabled;    /* to disable mmap */
994    size_t    size;
995    size_t boost_cont_multialloc_segmented_malloc_size;
996    size_t max_size;
997 
998    /* Check overflow */
999    if(!element_size){
1000       return 0;
1001    }
1002    max_size = MAX_REQUEST/element_size;
1003    /* Different sizes*/
1004    switch(contiguous_elements){
1005       case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
1006          /* Use default contiguous mem */
1007          boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM;
1008       break;
1009       case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS:
1010          boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD;
1011       break;
1012       default:
1013          if(max_size < contiguous_elements){
1014             return 0;
1015          }
1016          else{
1017             /* The suggested buffer is just the the element count by the size */
1018             boost_cont_multialloc_segmented_malloc_size = element_size*contiguous_elements;
1019          }
1020    }
1021 
1022    {
1023       size_t    i;
1024       size_t next_i;
1025       /*
1026          Allocate the aggregate chunk.  First disable direct-mmapping so
1027          malloc won't use it, since we would not be able to later
1028          free/realloc space internal to a segregated mmap region.
1029       */
1030       was_enabled = use_mmap(m);
1031       disable_mmap(m);
1032       for(i = 0, next_i = 0; i != n_elements; i = next_i)
1033       {
1034          int error = 0;
1035          size_t accum_size;
1036          for(accum_size = 0; next_i != n_elements; ++next_i){
1037             size_t cur_array_size   = sizes[next_i];
1038             if(max_size < cur_array_size){
1039                error = 1;
1040                break;
1041             }
1042             else{
1043                size_t reqsize = request2size(cur_array_size*element_size);
1044                if(((boost_cont_multialloc_segmented_malloc_size - CHUNK_OVERHEAD) - accum_size) < reqsize){
1045                   if(!accum_size){
1046                      accum_size += reqsize;
1047                      ++next_i;
1048                   }
1049                   break;
1050                }
1051                accum_size += reqsize;
1052             }
1053          }
1054 
1055          mem = error ? 0 : mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
1056          if (mem == 0){
1057             boost_cont_memchain_it it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
1058             while(i--){
1059                void *addr = BOOST_CONTAINER_MEMIT_ADDR(it);
1060                BOOST_CONTAINER_MEMIT_NEXT(it);
1061                s_allocated_memory -= chunksize(mem2chunk(addr));
1062                mspace_free_lockless(m, addr);
1063             }
1064             if (was_enabled)
1065                enable_mmap(m);
1066             return 0;
1067          }
1068          p = mem2chunk(mem);
1069          remainder_size = chunksize(p);
1070          s_allocated_memory += remainder_size;
1071 
1072          assert(!is_mmapped(p));
1073 
1074          {  /* split out elements */
1075             void *mem_orig = mem;
1076             boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
1077             size_t num_elements = next_i-i;
1078 
1079             for(++i; i != next_i; ++i) {
1080                void **mem_prev = ((void**)mem);
1081                size = request2size(sizes[i]*element_size);
1082                remainder_size -= size;
1083                set_size_and_pinuse_of_inuse_chunk(m, p, size);
1084                p = chunk_plus_offset(p, size);
1085                mem = chunk2mem(p);
1086                *mem_prev = mem;
1087             }
1088             set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
1089             BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
1090          }
1091       }
1092       if (was_enabled)
1093          enable_mmap(m);
1094    }
1095    return 1;
1096 }
1097 
boost_cont_multialloc_arrays(size_t n_elements,const size_t * sizes,size_t element_size,size_t contiguous_elements,boost_cont_memchain * pchain)1098 int boost_cont_multialloc_arrays
1099    (size_t n_elements, const size_t *sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1100 {
1101    int ret = 0;
1102    mstate ms = (mstate)gm;
1103    ensure_initialization();
1104    if (!ok_magic(ms)) {
1105       USAGE_ERROR_ACTION(ms,ms);
1106    }
1107    else if (!PREACTION(ms)) {
1108       ret = internal_multialloc_arrays(ms, n_elements, sizes, element_size, contiguous_elements, pchain);
1109       POSTACTION(ms);
1110    }
1111    return ret;
1112 }
1113 
1114 
1115 /*Doug Lea malloc extensions*/
get_malloc_stats(mstate m)1116 static boost_cont_malloc_stats_t get_malloc_stats(mstate m)
1117 {
1118    boost_cont_malloc_stats_t ret = { 0, 0, 0 };
1119    ensure_initialization();
1120    if (!PREACTION(m)) {
1121       size_t maxfp = 0;
1122       size_t fp = 0;
1123       size_t used = 0;
1124       check_malloc_state(m);
1125       if (is_initialized(m)) {
1126          msegmentptr s = &m->seg;
1127          maxfp = m->max_footprint;
1128          fp = m->footprint;
1129          used = fp - (m->topsize + TOP_FOOT_SIZE);
1130 
1131          while (s != 0) {
1132             mchunkptr q = align_as_chunk(s->base);
1133             while (segment_holds(s, q) &&
1134                   q != m->top && q->head != FENCEPOST_HEAD) {
1135                if (!cinuse(q))
1136                used -= chunksize(q);
1137                q = next_chunk(q);
1138             }
1139             s = s->next;
1140          }
1141       }
1142 
1143       ret.max_system_bytes   = maxfp;
1144       ret.system_bytes       = fp;
1145       ret.in_use_bytes       = used;
1146       POSTACTION(m);
1147    }
1148    return ret;
1149 }
1150 
boost_cont_size(const void * p)1151 size_t boost_cont_size(const void *p)
1152 {  return DL_SIZE_IMPL(p);  }
1153 
boost_cont_malloc(size_t bytes)1154 void* boost_cont_malloc(size_t bytes)
1155 {
1156    size_t received_bytes;
1157    ensure_initialization();
1158    return boost_cont_allocation_command
1159       (BOOST_CONTAINER_ALLOCATE_NEW, 1, bytes, bytes, &received_bytes, 0).first;
1160 }
1161 
boost_cont_free(void * mem)1162 void boost_cont_free(void* mem)
1163 {
1164    mstate ms = (mstate)gm;
1165    if (!ok_magic(ms)) {
1166       USAGE_ERROR_ACTION(ms,ms);
1167    }
1168    else if (!PREACTION(ms)) {
1169       if(mem)
1170          s_allocated_memory -= chunksize(mem2chunk(mem));
1171       mspace_free_lockless(ms, mem);
1172       POSTACTION(ms);
1173    }
1174 }
1175 
boost_cont_memalign(size_t bytes,size_t alignment)1176 void* boost_cont_memalign(size_t bytes, size_t alignment)
1177 {
1178    void *addr;
1179    ensure_initialization();
1180    addr = mspace_memalign(gm, alignment, bytes);
1181    if(addr){
1182       s_allocated_memory += chunksize(mem2chunk(addr));
1183    }
1184    return addr;
1185 }
1186 
boost_cont_multialloc_nodes(size_t n_elements,size_t elem_size,size_t contiguous_elements,boost_cont_memchain * pchain)1187 int boost_cont_multialloc_nodes
1188    (size_t n_elements, size_t elem_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1189 {
1190    int ret = 0;
1191    mstate ms = (mstate)gm;
1192    ensure_initialization();
1193    if (!ok_magic(ms)) {
1194       USAGE_ERROR_ACTION(ms,ms);
1195    }
1196    else if (!PREACTION(ms)) {
1197       ret = internal_node_multialloc(ms, n_elements, elem_size, contiguous_elements, pchain);
1198       POSTACTION(ms);
1199    }
1200    return ret;
1201 }
1202 
boost_cont_footprint()1203 size_t boost_cont_footprint()
1204 {
1205    return ((mstate)gm)->footprint;
1206 }
1207 
boost_cont_allocated_memory()1208 size_t boost_cont_allocated_memory()
1209 {
1210    size_t alloc_mem = 0;
1211    mstate m = (mstate)gm;
1212    ensure_initialization();
1213    if (!ok_magic(ms)) {
1214       USAGE_ERROR_ACTION(ms,ms);
1215    }
1216 
1217 
1218    if (!PREACTION(m)) {
1219       check_malloc_state(m);
1220       if (is_initialized(m)) {
1221       size_t nfree = SIZE_T_ONE; /* top always free */
1222       size_t mfree = m->topsize + TOP_FOOT_SIZE;
1223       size_t sum = mfree;
1224       msegmentptr s = &m->seg;
1225       while (s != 0) {
1226          mchunkptr q = align_as_chunk(s->base);
1227          while (segment_holds(s, q) &&
1228                q != m->top && q->head != FENCEPOST_HEAD) {
1229             size_t sz = chunksize(q);
1230             sum += sz;
1231             if (!is_inuse(q)) {
1232             mfree += sz;
1233             ++nfree;
1234             }
1235             q = next_chunk(q);
1236          }
1237          s = s->next;
1238       }
1239       {
1240          size_t uordblks = m->footprint - mfree;
1241          if(nfree)
1242             alloc_mem = (size_t)(uordblks - (nfree-1)*TOP_FOOT_SIZE);
1243          else
1244             alloc_mem = uordblks;
1245          }
1246       }
1247 
1248       POSTACTION(m);
1249    }
1250    return alloc_mem;
1251 }
1252 
boost_cont_chunksize(const void * p)1253 size_t boost_cont_chunksize(const void *p)
1254 {  return chunksize(mem2chunk(p));   }
1255 
boost_cont_all_deallocated()1256 int boost_cont_all_deallocated()
1257 {  return !s_allocated_memory;  }
1258 
boost_cont_malloc_stats()1259 boost_cont_malloc_stats_t boost_cont_malloc_stats()
1260 {
1261   mstate ms = (mstate)gm;
1262   if (ok_magic(ms)) {
1263     return get_malloc_stats(ms);
1264   }
1265   else {
1266     boost_cont_malloc_stats_t r = { 0, 0, 0 };
1267     USAGE_ERROR_ACTION(ms,ms);
1268     return r;
1269   }
1270 }
1271 
boost_cont_in_use_memory()1272 size_t boost_cont_in_use_memory()
1273 {  return s_allocated_memory;   }
1274 
boost_cont_trim(size_t pad)1275 int boost_cont_trim(size_t pad)
1276 {
1277    ensure_initialization();
1278    return dlmalloc_trim(pad);
1279 }
1280 
boost_cont_grow(void * oldmem,size_t minbytes,size_t maxbytes,size_t * received)1281 int boost_cont_grow
1282    (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received)
1283 {
1284    mstate ms = (mstate)gm;
1285    if (!ok_magic(ms)) {
1286       USAGE_ERROR_ACTION(ms,ms);
1287       return 0;
1288    }
1289 
1290    if (!PREACTION(ms)) {
1291       mchunkptr p = mem2chunk(oldmem);
1292       size_t oldsize = chunksize(p);
1293       p = try_realloc_chunk_with_min(ms, p, request2size(minbytes), request2size(maxbytes), 0);
1294       POSTACTION(ms);
1295       if(p){
1296          check_inuse_chunk(ms, p);
1297          *received = DL_SIZE_IMPL(oldmem);
1298          s_allocated_memory += chunksize(p) - oldsize;
1299       }
1300       return 0 != p;
1301    }
1302    return 0;
1303 }
1304 
boost_cont_shrink(void * oldmem,size_t minbytes,size_t maxbytes,size_t * received,int do_commit)1305 int boost_cont_shrink
1306    (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received, int do_commit)
1307 {
1308    mstate ms = (mstate)gm;
1309    if (!ok_magic(ms)) {
1310       USAGE_ERROR_ACTION(ms,ms);
1311       return 0;
1312    }
1313 
1314    if (!PREACTION(ms)) {
1315       int ret = internal_shrink(ms, oldmem, minbytes, maxbytes, received, do_commit);
1316       POSTACTION(ms);
1317       return 0 != ret;
1318    }
1319    return 0;
1320 }
1321 
1322 
boost_cont_alloc(size_t minbytes,size_t preferred_bytes,size_t * received_bytes)1323 void* boost_cont_alloc
1324    (size_t minbytes, size_t preferred_bytes, size_t *received_bytes)
1325 {
1326    //ensure_initialization provided by boost_cont_allocation_command
1327    return boost_cont_allocation_command
1328       (BOOST_CONTAINER_ALLOCATE_NEW, 1, minbytes, preferred_bytes, received_bytes, 0).first;
1329 }
1330 
boost_cont_multidealloc(boost_cont_memchain * pchain)1331 void boost_cont_multidealloc(boost_cont_memchain *pchain)
1332 {
1333    mstate ms = (mstate)gm;
1334    if (!ok_magic(ms)) {
1335       (void)ms;
1336       USAGE_ERROR_ACTION(ms,ms);
1337    }
1338    internal_multialloc_free(ms, pchain);
1339 }
1340 
boost_cont_malloc_check()1341 int boost_cont_malloc_check()
1342 {
1343 #ifdef DEBUG
1344    mstate ms = (mstate)gm;
1345    ensure_initialization();
1346    if (!ok_magic(ms)) {
1347       (void)ms;
1348       USAGE_ERROR_ACTION(ms,ms);
1349       return 0;
1350    }
1351    check_malloc_state(ms);
1352    return 1;
1353 #else
1354    return 1;
1355 #endif
1356 }
1357 
1358 
boost_cont_allocation_command(allocation_type command,size_t sizeof_object,size_t limit_size,size_t preferred_size,size_t * received_size,void * reuse_ptr)1359 boost_cont_command_ret_t boost_cont_allocation_command
1360    (allocation_type command, size_t sizeof_object, size_t limit_size
1361    , size_t preferred_size, size_t *received_size, void *reuse_ptr)
1362 {
1363    boost_cont_command_ret_t ret = { 0, 0 };
1364    ensure_initialization();
1365    if(command & (BOOST_CONTAINER_SHRINK_IN_PLACE | BOOST_CONTAINER_TRY_SHRINK_IN_PLACE)){
1366       int success = boost_cont_shrink( reuse_ptr, preferred_size, limit_size
1367                              , received_size, (command & BOOST_CONTAINER_SHRINK_IN_PLACE));
1368       ret.first = success ? reuse_ptr : 0;
1369       return ret;
1370    }
1371 
1372    *received_size = 0;
1373 
1374    if(limit_size > preferred_size)
1375       return ret;
1376 
1377    {
1378       mstate ms = (mstate)gm;
1379 
1380       /*Expand in place*/
1381       if (!PREACTION(ms)) {
1382          #if FOOTERS
1383          if(reuse_ptr){
1384             mstate m = get_mstate_for(mem2chunk(reuse_ptr));
1385             if (!ok_magic(m)) {
1386                USAGE_ERROR_ACTION(m, reuse_ptr);
1387                return ret;
1388             }
1389          }
1390          #endif
1391          if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1392             void *r = internal_grow_both_sides
1393                ( ms, command, reuse_ptr, limit_size
1394                , preferred_size, received_size, sizeof_object, 1);
1395             if(r){
1396                ret.first  = r;
1397                ret.second = 1;
1398                goto postaction;
1399             }
1400          }
1401 
1402          if(command & BOOST_CONTAINER_ALLOCATE_NEW){
1403             void *addr = mspace_malloc_lockless(ms, preferred_size);
1404             if(!addr)   addr = mspace_malloc_lockless(ms, limit_size);
1405             if(addr){
1406                s_allocated_memory += chunksize(mem2chunk(addr));
1407                *received_size = DL_SIZE_IMPL(addr);
1408             }
1409             ret.first  = addr;
1410             ret.second = 0;
1411             if(addr){
1412                goto postaction;
1413             }
1414          }
1415 
1416          //Now try to expand both sides with min size
1417          if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1418             void *r = internal_grow_both_sides
1419                ( ms, command, reuse_ptr, limit_size
1420                , preferred_size, received_size, sizeof_object, 0);
1421             if(r){
1422                ret.first  = r;
1423                ret.second = 1;
1424                goto postaction;
1425             }
1426          }
1427          postaction:
1428          POSTACTION(ms);
1429       }
1430    }
1431    return ret;
1432 }
1433 
boost_cont_mallopt(int param_number,int value)1434 int boost_cont_mallopt(int param_number, int value)
1435 {
1436   return change_mparam(param_number, value);
1437 }
1438 
boost_cont_sync_create()1439 void *boost_cont_sync_create()
1440 {
1441    void *p = boost_cont_malloc(sizeof(MLOCK_T));
1442    if(p){
1443       if(0 != INITIAL_LOCK((MLOCK_T*)p)){
1444          boost_cont_free(p);
1445          p = 0;
1446       }
1447    }
1448    return p;
1449 }
1450 
boost_cont_sync_destroy(void * sync)1451 void boost_cont_sync_destroy(void *sync)
1452 {
1453    if(sync){
1454       (void)DESTROY_LOCK((MLOCK_T*)sync);
1455       boost_cont_free(sync);
1456    }
1457 }
1458 
boost_cont_sync_lock(void * sync)1459 int boost_cont_sync_lock(void *sync)
1460 {  return 0 == (ACQUIRE_LOCK((MLOCK_T*)sync));  }
1461 
boost_cont_sync_unlock(void * sync)1462 void boost_cont_sync_unlock(void *sync)
1463 {  RELEASE_LOCK((MLOCK_T*)sync);  }
1464 
boost_cont_global_sync_lock()1465 int boost_cont_global_sync_lock()
1466 {
1467    int ret;
1468    ensure_initialization();
1469    ret = ACQUIRE_MALLOC_GLOBAL_LOCK();
1470    return 0 == ret;
1471 }
1472 
boost_cont_global_sync_unlock()1473 void boost_cont_global_sync_unlock()
1474 {
1475    RELEASE_MALLOC_GLOBAL_LOCK()
1476 }
1477 
1478 //#ifdef DL_DEBUG_DEFINED
1479 //   #undef DEBUG
1480 //#endif
1481 
1482 #ifdef _MSC_VER
1483 #pragma warning (pop)
1484 #endif
1485