1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28
29 #include <limits>
30 #include <wtf/Assertions.h>
31 #include <wtf/PassRefPtr.h>
32 #include <wtf/RefCounted.h>
33 #include <wtf/UnusedParam.h>
34 #include <wtf/Vector.h>
35
36 #if PLATFORM(IPHONE)
37 #include <libkern/OSCacheControl.h>
38 #include <sys/mman.h>
39 #endif
40
41 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
42 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
43
44 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
45 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
46 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
47 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
48 #else
49 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
50 #endif
51
52 namespace JSC {
53
roundUpAllocationSize(size_t request,size_t granularity)54 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
55 {
56 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
57 CRASH(); // Allocation is too large
58
59 // Round up to next page boundary
60 size_t size = request + (granularity - 1);
61 size = size & ~(granularity - 1);
62 ASSERT(size >= request);
63 return size;
64 }
65
66 }
67
68 #if ENABLE(ASSEMBLER)
69
70 namespace JSC {
71
72 class ExecutablePool : public RefCounted<ExecutablePool> {
73 private:
74 struct Allocation {
75 char* pages;
76 size_t size;
77 };
78 typedef Vector<Allocation, 2> AllocationList;
79
80 public:
create(size_t n)81 static PassRefPtr<ExecutablePool> create(size_t n)
82 {
83 return adoptRef(new ExecutablePool(n));
84 }
85
alloc(size_t n)86 void* alloc(size_t n)
87 {
88 ASSERT(m_freePtr <= m_end);
89
90 // Round 'n' up to a multiple of word size; if all allocations are of
91 // word sized quantities, then all subsequent allocations will be aligned.
92 n = roundUpAllocationSize(n, sizeof(void*));
93
94 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
95 void* result = m_freePtr;
96 m_freePtr += n;
97 return result;
98 }
99
100 // Insufficient space to allocate in the existing pool
101 // so we need allocate into a new pool
102 return poolAllocate(n);
103 }
104
~ExecutablePool()105 ~ExecutablePool()
106 {
107 AllocationList::const_iterator end = m_pools.end();
108 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
109 ExecutablePool::systemRelease(*ptr);
110 }
111
available()112 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
113
114 private:
115 static Allocation systemAlloc(size_t n);
116 static void systemRelease(const Allocation& alloc);
117
118 ExecutablePool(size_t n);
119
120 void* poolAllocate(size_t n);
121
122 char* m_freePtr;
123 char* m_end;
124 AllocationList m_pools;
125 };
126
127 class ExecutableAllocator {
128 enum ProtectionSeting { Writable, Executable };
129
130 public:
131 static size_t pageSize;
ExecutableAllocator()132 ExecutableAllocator()
133 {
134 if (!pageSize)
135 intializePageSize();
136 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
137 }
138
poolForSize(size_t n)139 PassRefPtr<ExecutablePool> poolForSize(size_t n)
140 {
141 // Try to fit in the existing small allocator
142 if (n < m_smallAllocationPool->available())
143 return m_smallAllocationPool;
144
145 // If the request is large, we just provide a unshared allocator
146 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
147 return ExecutablePool::create(n);
148
149 // Create a new allocator
150 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
151
152 // If the new allocator will result in more free space than in
153 // the current small allocator, then we will use it instead
154 if ((pool->available() - n) > m_smallAllocationPool->available())
155 m_smallAllocationPool = pool;
156 return pool.release();
157 }
158
159 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
makeWritable(void * start,size_t size)160 static void makeWritable(void* start, size_t size)
161 {
162 reprotectRegion(start, size, Writable);
163 }
164
makeExecutable(void * start,size_t size)165 static void makeExecutable(void* start, size_t size)
166 {
167 reprotectRegion(start, size, Executable);
168 }
169 #else
makeWritable(void *,size_t)170 static void makeWritable(void*, size_t) {}
makeExecutable(void *,size_t)171 static void makeExecutable(void*, size_t) {}
172 #endif
173
174
175 #if PLATFORM(X86) || PLATFORM(X86_64)
cacheFlush(void *,size_t)176 static void cacheFlush(void*, size_t)
177 {
178 }
179 #elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
cacheFlush(void * code,size_t size)180 static void cacheFlush(void* code, size_t size)
181 {
182 sys_dcache_flush(code, size);
183 sys_icache_invalidate(code, size);
184 }
185 #elif PLATFORM(ARM)
cacheFlush(void * code,size_t size)186 static void cacheFlush(void* code, size_t size)
187 {
188 #if COMPILER(GCC) && (GCC_VERSION >= 30406)
189 __clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(code) + size);
190 #else
191 const int syscall = 0xf0002;
192 __asm __volatile (
193 "mov r0, %0\n"
194 "mov r1, %1\n"
195 "mov r7, %2\n"
196 "mov r2, #0x0\n"
197 "swi 0x00000000\n"
198 :
199 : "r" (code), "r" (reinterpret_cast<char*>(code) + size), "r" (syscall)
200 : "r0", "r1", "r7");
201 #endif // COMPILER(GCC) && (GCC_VERSION >= 30406)
202 }
203 #endif
204
205 private:
206
207 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
208 static void reprotectRegion(void*, size_t, ProtectionSeting);
209 #endif
210
211 RefPtr<ExecutablePool> m_smallAllocationPool;
212 static void intializePageSize();
213 };
214
ExecutablePool(size_t n)215 inline ExecutablePool::ExecutablePool(size_t n)
216 {
217 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
218 Allocation mem = systemAlloc(allocSize);
219 m_pools.append(mem);
220 m_freePtr = mem.pages;
221 if (!m_freePtr)
222 CRASH(); // Failed to allocate
223 m_end = m_freePtr + allocSize;
224 }
225
poolAllocate(size_t n)226 inline void* ExecutablePool::poolAllocate(size_t n)
227 {
228 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
229
230 Allocation result = systemAlloc(allocSize);
231 if (!result.pages)
232 CRASH(); // Failed to allocate
233
234 ASSERT(m_end >= m_freePtr);
235 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
236 // Replace allocation pool
237 m_freePtr = result.pages + n;
238 m_end = result.pages + allocSize;
239 }
240
241 m_pools.append(result);
242 return result.pages;
243 }
244
245 }
246
247 #endif // ENABLE(ASSEMBLER)
248
249 #endif // !defined(ExecutableAllocator)
250