1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
28 #include <stddef.h> // for ptrdiff_t
29 #include <limits>
30 #include <wtf/Assertions.h>
31 #include <wtf/PageAllocation.h>
32 #include <wtf/PassRefPtr.h>
33 #include <wtf/RefCounted.h>
34 #include <wtf/UnusedParam.h>
35 #include <wtf/Vector.h>
36
37 #if OS(IOS)
38 #include <libkern/OSCacheControl.h>
39 #include <sys/mman.h>
40 #endif
41
42 #if OS(SYMBIAN)
43 #include <e32std.h>
44 #endif
45
46 #if CPU(MIPS) && OS(LINUX)
47 #include <sys/cachectl.h>
48 #endif
49
50 #if CPU(SH4) && OS(LINUX)
51 #include <asm/cachectl.h>
52 #include <asm/unistd.h>
53 #include <sys/syscall.h>
54 #include <unistd.h>
55 #endif
56
57 #if OS(WINCE)
58 // From pkfuncs.h (private header file from the Platform Builder)
59 #define CACHE_SYNC_ALL 0x07F
60 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
61 #endif
62
63 #if PLATFORM(BREWMP)
64 #include <AEEIMemCache1.h>
65 #include <AEEMemCache1.bid>
66 #include <wtf/brew/RefPtrBrew.h>
67 #endif
68
69 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
70 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
71
72 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
73 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
74 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
75 #define EXECUTABLE_POOL_WRITABLE false
76 #else
77 #define EXECUTABLE_POOL_WRITABLE true
78 #endif
79
80 namespace JSC {
81
roundUpAllocationSize(size_t request,size_t granularity)82 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
83 {
84 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
85 CRASH(); // Allocation is too large
86
87 // Round up to next page boundary
88 size_t size = request + (granularity - 1);
89 size = size & ~(granularity - 1);
90 ASSERT(size >= request);
91 return size;
92 }
93
94 }
95
96 #if ENABLE(JIT) && ENABLE(ASSEMBLER)
97
98 namespace JSC {
99
100 class ExecutablePool : public RefCounted<ExecutablePool> {
101 public:
102 #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
103 typedef PageAllocation Allocation;
104 #else
105 class Allocation {
106 public:
107 Allocation(void* base, size_t size)
108 : m_base(base)
109 , m_size(size)
110 {
111 }
112 void* base() { return m_base; }
113 size_t size() { return m_size; }
114 bool operator!() const { return !m_base; }
115
116 private:
117 void* m_base;
118 size_t m_size;
119 };
120 #endif
121 typedef Vector<Allocation, 2> AllocationList;
122
create(size_t n)123 static PassRefPtr<ExecutablePool> create(size_t n)
124 {
125 return adoptRef(new ExecutablePool(n));
126 }
127
alloc(size_t n)128 void* alloc(size_t n)
129 {
130 ASSERT(m_freePtr <= m_end);
131
132 // Round 'n' up to a multiple of word size; if all allocations are of
133 // word sized quantities, then all subsequent allocations will be aligned.
134 n = roundUpAllocationSize(n, sizeof(void*));
135
136 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
137 void* result = m_freePtr;
138 m_freePtr += n;
139 return result;
140 }
141
142 // Insufficient space to allocate in the existing pool
143 // so we need allocate into a new pool
144 return poolAllocate(n);
145 }
146
tryShrink(void * allocation,size_t oldSize,size_t newSize)147 void tryShrink(void* allocation, size_t oldSize, size_t newSize)
148 {
149 if (static_cast<char*>(allocation) + oldSize != m_freePtr)
150 return;
151 m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
152 }
153
~ExecutablePool()154 ~ExecutablePool()
155 {
156 AllocationList::iterator end = m_pools.end();
157 for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr)
158 ExecutablePool::systemRelease(*ptr);
159 }
160
available()161 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
162
163 private:
164 static Allocation systemAlloc(size_t n);
165 static void systemRelease(Allocation& alloc);
166
167 ExecutablePool(size_t n);
168
169 void* poolAllocate(size_t n);
170
171 char* m_freePtr;
172 char* m_end;
173 AllocationList m_pools;
174 };
175
176 class ExecutableAllocator {
177 enum ProtectionSetting { Writable, Executable };
178
179 public:
180 static size_t pageSize;
ExecutableAllocator()181 ExecutableAllocator()
182 {
183 if (!pageSize)
184 intializePageSize();
185 if (isValid())
186 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
187 #if !ENABLE(INTERPRETER)
188 else
189 CRASH();
190 #endif
191 }
192
193 bool isValid() const;
194
195 static bool underMemoryPressure();
196
poolForSize(size_t n)197 PassRefPtr<ExecutablePool> poolForSize(size_t n)
198 {
199 // Try to fit in the existing small allocator
200 ASSERT(m_smallAllocationPool);
201 if (n < m_smallAllocationPool->available())
202 return m_smallAllocationPool;
203
204 // If the request is large, we just provide a unshared allocator
205 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
206 return ExecutablePool::create(n);
207
208 // Create a new allocator
209 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
210
211 // If the new allocator will result in more free space than in
212 // the current small allocator, then we will use it instead
213 if ((pool->available() - n) > m_smallAllocationPool->available())
214 m_smallAllocationPool = pool;
215 return pool.release();
216 }
217
218 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
makeWritable(void * start,size_t size)219 static void makeWritable(void* start, size_t size)
220 {
221 reprotectRegion(start, size, Writable);
222 }
223
makeExecutable(void * start,size_t size)224 static void makeExecutable(void* start, size_t size)
225 {
226 reprotectRegion(start, size, Executable);
227 }
228 #else
makeWritable(void *,size_t)229 static void makeWritable(void*, size_t) {}
makeExecutable(void *,size_t)230 static void makeExecutable(void*, size_t) {}
231 #endif
232
233
234 #if CPU(X86) || CPU(X86_64)
cacheFlush(void *,size_t)235 static void cacheFlush(void*, size_t)
236 {
237 }
238 #elif CPU(MIPS)
cacheFlush(void * code,size_t size)239 static void cacheFlush(void* code, size_t size)
240 {
241 #if GCC_VERSION_AT_LEAST(4, 3, 0)
242 #if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
243 int lineSize;
244 asm("rdhwr %0, $1" : "=r" (lineSize));
245 //
246 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
247 // mips_expand_synci_loop that may execute synci one more time.
248 // "start" points to the fisrt byte of the cache line.
249 // "end" points to the last byte of the line before the last cache line.
250 // Because size is always a multiple of 4, this is safe to set
251 // "end" to the last byte.
252 //
253 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
254 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
255 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
256 #else
257 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
258 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
259 #endif
260 #else
261 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
262 #endif
263 }
264 #elif CPU(ARM_THUMB2) && OS(IOS)
cacheFlush(void * code,size_t size)265 static void cacheFlush(void* code, size_t size)
266 {
267 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
268 }
269 #elif CPU(ARM_THUMB2) && OS(LINUX)
cacheFlush(void * code,size_t size)270 static void cacheFlush(void* code, size_t size)
271 {
272 asm volatile (
273 "push {r7}\n"
274 "mov r0, %0\n"
275 "mov r1, %1\n"
276 "movw r7, #0x2\n"
277 "movt r7, #0xf\n"
278 "movs r2, #0x0\n"
279 "svc 0x0\n"
280 "pop {r7}\n"
281 :
282 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
283 : "r0", "r1", "r2");
284 }
285 #elif OS(SYMBIAN)
cacheFlush(void * code,size_t size)286 static void cacheFlush(void* code, size_t size)
287 {
288 User::IMB_Range(code, static_cast<char*>(code) + size);
289 }
290 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
291 static __asm void cacheFlush(void* code, size_t size);
292 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
cacheFlush(void * code,size_t size)293 static void cacheFlush(void* code, size_t size)
294 {
295 asm volatile (
296 "push {r7}\n"
297 "mov r0, %0\n"
298 "mov r1, %1\n"
299 "mov r7, #0xf0000\n"
300 "add r7, r7, #0x2\n"
301 "mov r2, #0x0\n"
302 "svc 0x0\n"
303 "pop {r7}\n"
304 :
305 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
306 : "r0", "r1", "r2");
307 }
308 #elif OS(WINCE)
cacheFlush(void * code,size_t size)309 static void cacheFlush(void* code, size_t size)
310 {
311 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
312 }
313 #elif PLATFORM(BREWMP)
cacheFlush(void * code,size_t size)314 static void cacheFlush(void* code, size_t size)
315 {
316 RefPtr<IMemCache1> memCache = createRefPtrInstance<IMemCache1>(AEECLSID_MemCache1);
317 IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_FLUSH, MEMSPACE_DATACACHE);
318 IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_INVALIDATE, MEMSPACE_INSTCACHE);
319 }
320 #elif CPU(SH4) && OS(LINUX)
cacheFlush(void * code,size_t size)321 static void cacheFlush(void* code, size_t size)
322 {
323 #ifdef CACHEFLUSH_D_L2
324 syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
325 #else
326 syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I);
327 #endif
328 }
329 #else
330 #error "The cacheFlush support is missing on this platform."
331 #endif
332 static size_t committedByteCount();
333
334 private:
335
336 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
337 static void reprotectRegion(void*, size_t, ProtectionSetting);
338 #endif
339
340 RefPtr<ExecutablePool> m_smallAllocationPool;
341 static void intializePageSize();
342 };
343
ExecutablePool(size_t n)344 inline ExecutablePool::ExecutablePool(size_t n)
345 {
346 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
347 Allocation mem = systemAlloc(allocSize);
348 m_pools.append(mem);
349 m_freePtr = static_cast<char*>(mem.base());
350 if (!m_freePtr)
351 CRASH(); // Failed to allocate
352 m_end = m_freePtr + allocSize;
353 }
354
poolAllocate(size_t n)355 inline void* ExecutablePool::poolAllocate(size_t n)
356 {
357 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
358
359 Allocation result = systemAlloc(allocSize);
360 if (!result.base())
361 CRASH(); // Failed to allocate
362
363 ASSERT(m_end >= m_freePtr);
364 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
365 // Replace allocation pool
366 m_freePtr = static_cast<char*>(result.base()) + n;
367 m_end = static_cast<char*>(result.base()) + allocSize;
368 }
369
370 m_pools.append(result);
371 return result.base();
372 }
373
374 }
375
376 #endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
377
378 #endif // !defined(ExecutableAllocator)
379