1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include <wtf/Assertions.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/Vector.h>
35
36 #include <limits>
37
38 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
39 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
40
41 namespace JSC {
42
43 class ExecutablePool : public RefCounted<ExecutablePool> {
44 private:
45 struct Allocation {
46 char* pages;
47 size_t size;
48 };
49 typedef Vector<Allocation, 2> AllocationList;
50
51 public:
create(size_t n)52 static PassRefPtr<ExecutablePool> create(size_t n)
53 {
54 return adoptRef(new ExecutablePool(n));
55 }
56
alloc(size_t n)57 void* alloc(size_t n)
58 {
59 ASSERT(m_freePtr <= m_end);
60
61 // Round 'n' up to a multiple of word size; if all allocations are of
62 // word sized quantities, then all subsequent allocations will be aligned.
63 n = roundUpAllocationSize(n, sizeof(void*));
64
65 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
66 void* result = m_freePtr;
67 m_freePtr += n;
68 return result;
69 }
70
71 // Insufficient space to allocate in the existing pool
72 // so we need allocate into a new pool
73 return poolAllocate(n);
74 }
75
~ExecutablePool()76 ~ExecutablePool()
77 {
78 AllocationList::const_iterator end = m_pools.end();
79 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
80 ExecutablePool::systemRelease(*ptr);
81 }
82
available()83 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
84
85 private:
86 static Allocation systemAlloc(size_t n);
87 static void systemRelease(const Allocation& alloc);
88
roundUpAllocationSize(size_t request,size_t granularity)89 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
90 {
91 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
92 CRASH(); // Allocation is too large
93
94 // Round up to next page boundary
95 size_t size = request + (granularity - 1);
96 size = size & ~(granularity - 1);
97 ASSERT(size >= request);
98 return size;
99 }
100
101 ExecutablePool(size_t n);
102
103 void* poolAllocate(size_t n);
104
105 char* m_freePtr;
106 char* m_end;
107 AllocationList m_pools;
108 };
109
110 class ExecutableAllocator {
111 public:
112 static size_t pageSize;
ExecutableAllocator()113 ExecutableAllocator()
114 {
115 if (!pageSize)
116 intializePageSize();
117 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
118 }
119
poolForSize(size_t n)120 PassRefPtr<ExecutablePool> poolForSize(size_t n)
121 {
122 // Try to fit in the existing small allocator
123 if (n < m_smallAllocationPool->available())
124 return m_smallAllocationPool;
125
126 // If the request is large, we just provide a unshared allocator
127 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
128 return ExecutablePool::create(n);
129
130 // Create a new allocator
131 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
132
133 // If the new allocator will result in more free space than in
134 // the current small allocator, then we will use it instead
135 if ((pool->available() - n) > m_smallAllocationPool->available())
136 m_smallAllocationPool = pool;
137 return pool.release();
138 }
139
140 private:
141 RefPtr<ExecutablePool> m_smallAllocationPool;
142 static void intializePageSize();
143 };
144
ExecutablePool(size_t n)145 inline ExecutablePool::ExecutablePool(size_t n)
146 {
147 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
148 Allocation mem = systemAlloc(allocSize);
149 m_pools.append(mem);
150 m_freePtr = mem.pages;
151 if (!m_freePtr)
152 CRASH(); // Failed to allocate
153 m_end = m_freePtr + allocSize;
154 }
155
poolAllocate(size_t n)156 inline void* ExecutablePool::poolAllocate(size_t n)
157 {
158 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
159
160 Allocation result = systemAlloc(allocSize);
161 if (!result.pages)
162 CRASH(); // Failed to allocate
163
164 ASSERT(m_end >= m_freePtr);
165 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
166 // Replace allocation pool
167 m_freePtr = result.pages + n;
168 m_end = result.pages + allocSize;
169 }
170
171 m_pools.append(result);
172 return result.pages;
173 }
174
175 }
176
177 #endif // ENABLE(ASSEMBLER)
178
179 #endif // !defined(ExecutableAllocator)
180