• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include "config.h"
27 
28 #include "ExecutableAllocator.h"
29 
30 #include <errno.h>
31 
32 #if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64)
33 
34 #include "TCSpinLock.h"
35 #include <mach/mach_init.h>
36 #include <mach/vm_map.h>
37 #include <sys/mman.h>
38 #include <unistd.h>
39 #include <wtf/AVLTree.h>
40 #include <wtf/VMTags.h>
41 
42 using namespace WTF;
43 
44 namespace JSC {
45 
46 #define TWO_GB (2u * 1024u * 1024u * 1024u)
47 #define SIXTEEN_MB (16u * 1024u * 1024u)
48 
49 // FreeListEntry describes a free chunk of memory, stored in the freeList.
50 struct FreeListEntry {
FreeListEntryJSC::FreeListEntry51     FreeListEntry(void* pointer, size_t size)
52         : pointer(pointer)
53         , size(size)
54         , nextEntry(0)
55         , less(0)
56         , greater(0)
57         , balanceFactor(0)
58     {
59     }
60 
61     // All entries of the same size share a single entry
62     // in the AVLTree, and are linked together in a linked
63     // list, using nextEntry.
64     void* pointer;
65     size_t size;
66     FreeListEntry* nextEntry;
67 
68     // These fields are used by AVLTree.
69     FreeListEntry* less;
70     FreeListEntry* greater;
71     int balanceFactor;
72 };
73 
74 // Abstractor class for use in AVLTree.
75 // Nodes in the AVLTree are of type FreeListEntry, keyed on
76 // (and thus sorted by) their size.
77 struct AVLTreeAbstractorForFreeList {
78     typedef FreeListEntry* handle;
79     typedef int32_t size;
80     typedef size_t key;
81 
get_lessJSC::AVLTreeAbstractorForFreeList82     handle get_less(handle h) { return h->less; }
set_lessJSC::AVLTreeAbstractorForFreeList83     void set_less(handle h, handle lh) { h->less = lh; }
get_greaterJSC::AVLTreeAbstractorForFreeList84     handle get_greater(handle h) { return h->greater; }
set_greaterJSC::AVLTreeAbstractorForFreeList85     void set_greater(handle h, handle gh) { h->greater = gh; }
get_balance_factorJSC::AVLTreeAbstractorForFreeList86     int get_balance_factor(handle h) { return h->balanceFactor; }
set_balance_factorJSC::AVLTreeAbstractorForFreeList87     void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
88 
nullJSC::AVLTreeAbstractorForFreeList89     static handle null() { return 0; }
90 
compare_key_keyJSC::AVLTreeAbstractorForFreeList91     int compare_key_key(key va, key vb) { return va - vb; }
compare_key_nodeJSC::AVLTreeAbstractorForFreeList92     int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
compare_node_nodeJSC::AVLTreeAbstractorForFreeList93     int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
94 };
95 
96 // Used to reverse sort an array of FreeListEntry pointers.
reverseSortFreeListEntriesByPointer(const void * leftPtr,const void * rightPtr)97 static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
98 {
99     FreeListEntry* left = *(FreeListEntry**)leftPtr;
100     FreeListEntry* right = *(FreeListEntry**)rightPtr;
101 
102     return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
103 }
104 
105 // Used to reverse sort an array of pointers.
reverseSortCommonSizedAllocations(const void * leftPtr,const void * rightPtr)106 static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
107 {
108     void* left = *(void**)leftPtr;
109     void* right = *(void**)rightPtr;
110 
111     return (intptr_t)right - (intptr_t)left;
112 }
113 
114 class FixedVMPoolAllocator
115 {
116     // The free list is stored in a sorted tree.
117     typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
118 
119     // Use madvise as apropriate to prevent freed pages from being spilled,
120     // and to attempt to ensure that used memory is reported correctly.
121 #if HAVE(MADV_FREE_REUSE)
release(void * position,size_t size)122     void release(void* position, size_t size)
123     {
124         while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
125     }
126 
reuse(void * position,size_t size)127     void reuse(void* position, size_t size)
128     {
129         while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
130     }
131 #elif HAVE(MADV_DONTNEED)
release(void * position,size_t size)132     void release(void* position, size_t size)
133     {
134         while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
135     }
136 
reuse(void *,size_t)137     void reuse(void*, size_t) {}
138 #else
release(void *,size_t)139     void release(void*, size_t) {}
reuse(void *,size_t)140     void reuse(void*, size_t) {}
141 #endif
142 
143     // All addition to the free list should go through this method, rather than
144     // calling insert directly, to avoid multiple entries beging added with the
145     // same key.  All nodes being added should be singletons, they should not
146     // already be a part of a chain.
addToFreeList(FreeListEntry * entry)147     void addToFreeList(FreeListEntry* entry)
148     {
149         ASSERT(!entry->nextEntry);
150 
151         if (entry->size == m_commonSize) {
152             m_commonSizedAllocations.append(entry->pointer);
153             delete entry;
154         } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
155             // m_freeList already contain an entry for this size - insert this node into the chain.
156             entry->nextEntry = entryInFreeList->nextEntry;
157             entryInFreeList->nextEntry = entry;
158         } else
159             m_freeList.insert(entry);
160     }
161 
162     // We do not attempt to coalesce addition, which may lead to fragmentation;
163     // instead we periodically perform a sweep to try to coalesce neigboring
164     // entries in m_freeList.  Presently this is triggered at the point 16MB
165     // of memory has been released.
coalesceFreeSpace()166     void coalesceFreeSpace()
167     {
168         Vector<FreeListEntry*> freeListEntries;
169         SizeSortedFreeTree::Iterator iter;
170         iter.start_iter_least(m_freeList);
171 
172         // Empty m_freeList into a Vector.
173         for (FreeListEntry* entry; (entry = *iter); ++iter) {
174             // Each entry in m_freeList might correspond to multiple
175             // free chunks of memory (of the same size).  Walk the chain
176             // (this is likely of couse only be one entry long!) adding
177             // each entry to the Vector (at reseting the next in chain
178             // pointer to separate each node out).
179             FreeListEntry* next;
180             do {
181                 next = entry->nextEntry;
182                 entry->nextEntry = 0;
183                 freeListEntries.append(entry);
184             } while ((entry = next));
185         }
186         // All entries are now in the Vector; purge the tree.
187         m_freeList.purge();
188 
189         // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
190         // We reverse-sort so that we can logically work forwards through memory,
191         // whilst popping items off the end of the Vectors using last() and removeLast().
192         qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
193         qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
194 
195         // The entries from m_commonSizedAllocations that cannot be
196         // coalesced into larger chunks will be temporarily stored here.
197         Vector<void*> newCommonSizedAllocations;
198 
199         // Keep processing so long as entries remain in either of the vectors.
200         while (freeListEntries.size() || m_commonSizedAllocations.size()) {
201             // We're going to try to find a FreeListEntry node that we can coalesce onto.
202             FreeListEntry* coalescionEntry = 0;
203 
204             // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
205             if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
206                 // Pop an item from the m_commonSizedAllocations vector - this is the lowest
207                 // addressed free chunk.  Find out the begin and end addresses of the memory chunk.
208                 void* begin = m_commonSizedAllocations.last();
209                 void* end = (void*)((intptr_t)begin + m_commonSize);
210                 m_commonSizedAllocations.removeLast();
211 
212                 // Try to find another free chunk abutting onto the end of the one we have already found.
213                 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
214                     // There is an existing FreeListEntry for the next chunk of memory!
215                     // we can reuse this.  Pop it off the end of m_freeList.
216                     coalescionEntry = freeListEntries.last();
217                     freeListEntries.removeLast();
218                     // Update the existing node to include the common-sized chunk that we also found.
219                     coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
220                     coalescionEntry->size += m_commonSize;
221                 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
222                     // There is a second common-sized chunk that can be coalesced.
223                     // Allocate a new node.
224                     m_commonSizedAllocations.removeLast();
225                     coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
226                 } else {
227                     // Nope - this poor little guy is all on his own. :-(
228                     // Add him into the newCommonSizedAllocations vector for now, we're
229                     // going to end up adding him back into the m_commonSizedAllocations
230                     // list when we're done.
231                     newCommonSizedAllocations.append(begin);
232                     continue;
233                 }
234             } else {
235                 ASSERT(freeListEntries.size());
236                 ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
237                 // The lowest addressed item is from m_freeList; pop it from the Vector.
238                 coalescionEntry = freeListEntries.last();
239                 freeListEntries.removeLast();
240             }
241 
242             // Right, we have a FreeListEntry, we just need check if there is anything else
243             // to coalesce onto the end.
244             ASSERT(coalescionEntry);
245             while (true) {
246                 // Calculate the end address of the chunk we have found so far.
247                 void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
248 
249                 // Is there another chunk adjacent to the one we already have?
250                 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
251                     // Yes - another FreeListEntry -pop it from the list.
252                     FreeListEntry* coalescee = freeListEntries.last();
253                     freeListEntries.removeLast();
254                     // Add it's size onto our existing node.
255                     coalescionEntry->size += coalescee->size;
256                     delete coalescee;
257                 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
258                     // We can coalesce the next common-sized chunk.
259                     m_commonSizedAllocations.removeLast();
260                     coalescionEntry->size += m_commonSize;
261                 } else
262                     break; // Nope, nothing to be added - stop here.
263             }
264 
265             // We've coalesced everything we can onto the current chunk.
266             // Add it back into m_freeList.
267             addToFreeList(coalescionEntry);
268         }
269 
270         // All chunks of free memory larger than m_commonSize should be
271         // back in m_freeList by now.  All that remains to be done is to
272         // copy the contents on the newCommonSizedAllocations back into
273         // the m_commonSizedAllocations Vector.
274         ASSERT(m_commonSizedAllocations.size() == 0);
275         m_commonSizedAllocations.append(newCommonSizedAllocations);
276     }
277 
278 public:
279 
FixedVMPoolAllocator(size_t commonSize,size_t totalHeapSize)280     FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
281         : m_commonSize(commonSize)
282         , m_countFreedSinceLastCoalesce(0)
283         , m_totalHeapSize(totalHeapSize)
284     {
285         // Cook up an address to allocate at, using the following recipe:
286         //   17 bits of zero, stay in userspace kids.
287         //   26 bits of randomness for ASLR.
288         //   21 bits of zero, at least stay aligned within one level of the pagetables.
289         //
290         // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
291         // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
292         // 2^24, which should put up somewhere in the middle of usespace (in the address range
293         // 0x200000000000 .. 0x5fffffffffff).
294         intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
295         randomLocation += (1 << 24);
296         randomLocation <<= 21;
297         m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
298         if (!m_base)
299             CRASH();
300 
301         // For simplicity, we keep all memory in m_freeList in a 'released' state.
302         // This means that we can simply reuse all memory when allocating, without
303         // worrying about it's previous state, and also makes coalescing m_freeList
304         // simpler since we need not worry about the possibility of coalescing released
305         // chunks with non-released ones.
306         release(m_base, m_totalHeapSize);
307         m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
308     }
309 
alloc(size_t size)310     void* alloc(size_t size)
311     {
312         void* result;
313 
314         // Freed allocations of the common size are not stored back into the main
315         // m_freeList, but are instead stored in a separate vector.  If the request
316         // is for a common sized allocation, check this list.
317         if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
318             result = m_commonSizedAllocations.last();
319             m_commonSizedAllocations.removeLast();
320         } else {
321             // Serach m_freeList for a suitable sized chunk to allocate memory from.
322             FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
323 
324             // This would be bad news.
325             if (!entry) {
326                 // Errk!  Lets take a last-ditch desparation attempt at defragmentation...
327                 coalesceFreeSpace();
328                 // Did that free up a large enough chunk?
329                 entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
330                 // No?...  *BOOM!*
331                 if (!entry)
332                     CRASH();
333             }
334             ASSERT(entry->size != m_commonSize);
335 
336             // Remove the entry from m_freeList.  But! -
337             // Each entry in the tree may represent a chain of multiple chunks of the
338             // same size, and we only want to remove one on them.  So, if this entry
339             // does have a chain, just remove the first-but-one item from the chain.
340             if (FreeListEntry* next = entry->nextEntry) {
341                 // We're going to leave 'entry' in the tree; remove 'next' from its chain.
342                 entry->nextEntry = next->nextEntry;
343                 next->nextEntry = 0;
344                 entry = next;
345             } else
346                 m_freeList.remove(entry->size);
347 
348             // Whoo!, we have a result!
349             ASSERT(entry->size >= size);
350             result = entry->pointer;
351 
352             // If the allocation exactly fits the chunk we found in the,
353             // m_freeList then the FreeListEntry node is no longer needed.
354             if (entry->size == size)
355                 delete entry;
356             else {
357                 // There is memory left over, and it is not of the common size.
358                 // We can reuse the existing FreeListEntry node to add this back
359                 // into m_freeList.
360                 entry->pointer = (void*)((intptr_t)entry->pointer + size);
361                 entry->size -= size;
362                 addToFreeList(entry);
363             }
364         }
365 
366         // Call reuse to report to the operating system that this memory is in use.
367         ASSERT(isWithinVMPool(result, size));
368         reuse(result, size);
369         return result;
370     }
371 
free(void * pointer,size_t size)372     void free(void* pointer, size_t size)
373     {
374         // Call release to report to the operating system that this
375         // memory is no longer in use, and need not be paged out.
376         ASSERT(isWithinVMPool(pointer, size));
377         release(pointer, size);
378 
379         // Common-sized allocations are stored in the m_commonSizedAllocations
380         // vector; all other freed chunks are added to m_freeList.
381         if (size == m_commonSize)
382             m_commonSizedAllocations.append(pointer);
383         else
384             addToFreeList(new FreeListEntry(pointer, size));
385 
386         // Do some housekeeping.  Every time we reach a point that
387         // 16MB of allocations have been freed, sweep m_freeList
388         // coalescing any neighboring fragments.
389         m_countFreedSinceLastCoalesce += size;
390         if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
391             m_countFreedSinceLastCoalesce = 0;
392             coalesceFreeSpace();
393         }
394     }
395 
396 private:
397 
398 #ifndef NDEBUG
isWithinVMPool(void * pointer,size_t size)399     bool isWithinVMPool(void* pointer, size_t size)
400     {
401         return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
402     }
403 #endif
404 
405     // Freed space from the most common sized allocations will be held in this list, ...
406     const size_t m_commonSize;
407     Vector<void*> m_commonSizedAllocations;
408 
409     // ... and all other freed allocations are held in m_freeList.
410     SizeSortedFreeTree m_freeList;
411 
412     // This is used for housekeeping, to trigger defragmentation of the freed lists.
413     size_t m_countFreedSinceLastCoalesce;
414 
415     void* m_base;
416     size_t m_totalHeapSize;
417 };
418 
intializePageSize()419 void ExecutableAllocator::intializePageSize()
420 {
421     ExecutableAllocator::pageSize = getpagesize();
422 }
423 
424 static FixedVMPoolAllocator* allocator = 0;
425 static SpinLock spinlock = SPINLOCK_INITIALIZER;
426 
systemAlloc(size_t size)427 ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
428 {
429   SpinLockHolder lock_holder(&spinlock);
430 
431     if (!allocator)
432         allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
433     ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
434     return alloc;
435 }
436 
systemRelease(const ExecutablePool::Allocation & allocation)437 void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
438 {
439   SpinLockHolder lock_holder(&spinlock);
440 
441     ASSERT(allocator);
442     allocator->free(allocation.pages, allocation.size);
443 }
444 
445 }
446 
447 #endif // HAVE(ASSEMBLER)
448