• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef BumpPointerAllocator_h
27 #define BumpPointerAllocator_h
28 
29 #include <wtf/PageAllocation.h>
30 
31 namespace WTF {
32 
33 #define MINIMUM_BUMP_POOL_SIZE 0x1000
34 
35 class BumpPointerPool {
36 public:
37     // ensureCapacity will check whether the current pool has capacity to
38     // allocate 'size' bytes of memory  If it does not, it will attempt to
39     // allocate a new pool (which will be added to this one in a chain).
40     //
41     // If allocation fails (out of memory) this method will return null.
42     // If the return value is non-null, then callers should update any
43     // references they have to this current (possibly full) BumpPointerPool
44     // to instead point to the newly returned BumpPointerPool.
ensureCapacity(size_t size)45     BumpPointerPool* ensureCapacity(size_t size)
46     {
47         void* allocationEnd = static_cast<char*>(m_current) + size;
48         ASSERT(allocationEnd > m_current); // check for overflow
49         if (allocationEnd <= static_cast<void*>(this))
50             return this;
51         return ensureCapacityCrossPool(this, size);
52     }
53 
54     // alloc should only be called after calling ensureCapacity; as such
55     // alloc will never fail.
alloc(size_t size)56     void* alloc(size_t size)
57     {
58         void* current = m_current;
59         void* allocationEnd = static_cast<char*>(current) + size;
60         ASSERT(allocationEnd > current); // check for overflow
61         ASSERT(allocationEnd <= static_cast<void*>(this));
62         m_current = allocationEnd;
63         return current;
64     }
65 
66     // The dealloc method releases memory allocated using alloc.  Memory
67     // must be released in a LIFO fashion, e.g. if the client calls alloc
68     // four times, returning pointer A, B, C, D, then the only valid order
69     // in which these may be deallocaed is D, C, B, A.
70     //
71     // The client may optionally skip some deallocations.  In the example
72     // above, it would be valid to only explicitly dealloc C, A (D being
73     // dealloced along with C, B along with A).
74     //
75     // If pointer was not allocated from this pool (or pools) then dealloc
76     // will CRASH().  Callers should update any references they have to
77     // this current BumpPointerPool to instead point to the returned
78     // BumpPointerPool.
dealloc(void * position)79     BumpPointerPool* dealloc(void* position)
80     {
81         if ((position >= m_start) && (position <= static_cast<void*>(this))) {
82             ASSERT(position <= m_current);
83             m_current = position;
84             return this;
85         }
86         return deallocCrossPool(this, position);
87     }
88 
89 private:
90     // Placement operator new, returns the last 'size' bytes of allocation for use as this.
new(size_t size,const PageAllocation & allocation)91     void* operator new(size_t size, const PageAllocation& allocation)
92     {
93         ASSERT(size < allocation.size());
94         return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
95     }
96 
BumpPointerPool(const PageAllocation & allocation)97     BumpPointerPool(const PageAllocation& allocation)
98         : m_current(allocation.base())
99         , m_start(allocation.base())
100         , m_next(0)
101         , m_previous(0)
102         , m_allocation(allocation)
103     {
104     }
105 
106     static BumpPointerPool* create(size_t minimumCapacity = 0)
107     {
108         // Add size of BumpPointerPool object, check for overflow.
109         minimumCapacity += sizeof(BumpPointerPool);
110         if (minimumCapacity < sizeof(BumpPointerPool))
111             return 0;
112 
113         size_t poolSize = MINIMUM_BUMP_POOL_SIZE;
114         while (poolSize < minimumCapacity) {
115             poolSize <<= 1;
116             // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
117             ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
118             if (!poolSize)
119                 return 0;
120         }
121 
122         PageAllocation allocation = PageAllocation::allocate(poolSize);
123         if (!!allocation)
124             return new(allocation) BumpPointerPool(allocation);
125         return 0;
126     }
127 
shrink()128     void shrink()
129     {
130         ASSERT(!m_previous);
131         m_current = m_start;
132         while (m_next) {
133             BumpPointerPool* nextNext = m_next->m_next;
134             m_next->destroy();
135             m_next = nextNext;
136         }
137     }
138 
destroy()139     void destroy()
140     {
141         m_allocation.deallocate();
142     }
143 
ensureCapacityCrossPool(BumpPointerPool * previousPool,size_t size)144     static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
145     {
146         // The pool passed should not have capacity, so we'll start with the next one.
147         ASSERT(previousPool);
148         ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
149         ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
150         BumpPointerPool* pool = previousPool->m_next;
151 
152         while (true) {
153             if (!pool) {
154                 // We've run to the end; allocate a new pool.
155                 pool = BumpPointerPool::create(size);
156                 previousPool->m_next = pool;
157                 pool->m_previous = previousPool;
158                 return pool;
159             }
160 
161             //
162             void* current = pool->m_current;
163             void* allocationEnd = static_cast<char*>(current) + size;
164             ASSERT(allocationEnd > current); // check for overflow
165             if (allocationEnd <= static_cast<void*>(pool))
166                 return pool;
167         }
168     }
169 
deallocCrossPool(BumpPointerPool * pool,void * position)170     static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
171     {
172         // Should only be called if position is not in the current pool.
173         ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
174 
175         while (true) {
176             // Unwind the current pool to the start, move back in the chain to the previous pool.
177             pool->m_current = pool->m_start;
178             pool = pool->m_previous;
179 
180             // position was nowhere in the chain!
181             if (!pool)
182                 CRASH();
183 
184             if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
185                 ASSERT(position <= pool->m_current);
186                 pool->m_current = position;
187                 return pool;
188             }
189         }
190     }
191 
192     void* m_current;
193     void* m_start;
194     BumpPointerPool* m_next;
195     BumpPointerPool* m_previous;
196     PageAllocation m_allocation;
197 
198     friend class BumpPointerAllocator;
199 };
200 
201 // A BumpPointerAllocator manages a set of BumpPointerPool objects, which
202 // can be used for LIFO (stack like) allocation.
203 //
204 // To begin allocating using this class call startAllocator().  The result
205 // of this method will be null if the initial pool allocation fails, or a
206 // pointer to a BumpPointerPool object that can be used to perform
207 // allocations.  Whilst running no memory will be released until
208 // stopAllocator() is called.  At this point all allocations made through
209 // this allocator will be reaped, and underlying memory may be freed.
210 //
211 // (In practice we will still hold on to the initial pool to allow allocation
212 // to be quickly restared, but aditional pools will be freed).
213 //
214 // This allocator is non-renetrant, it is encumbant on the clients to ensure
215 // startAllocator() is not called again until stopAllocator() has been called.
216 class BumpPointerAllocator {
217 public:
BumpPointerAllocator()218     BumpPointerAllocator()
219         : m_head(0)
220     {
221     }
222 
~BumpPointerAllocator()223     ~BumpPointerAllocator()
224     {
225         if (m_head)
226             m_head->destroy();
227     }
228 
startAllocator()229     BumpPointerPool* startAllocator()
230     {
231         if (!m_head)
232             m_head = BumpPointerPool::create();
233         return m_head;
234     }
235 
stopAllocator()236     void stopAllocator()
237     {
238         if (m_head)
239             m_head->shrink();
240     }
241 
242 private:
243     BumpPointerPool* m_head;
244 };
245 
246 }
247 
248 using WTF::BumpPointerAllocator;
249 
250 #endif // BumpPointerAllocator_h
251