• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 #include "config.h"
22 #include "Collector.h"
23 
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
29 #include "JSLock.h"
30 #include "JSString.h"
31 #include "JSValue.h"
32 #include "Nodes.h"
33 #include "Tracing.h"
34 #include <algorithm>
35 #include <setjmp.h>
36 #include <stdlib.h>
37 #include <wtf/FastMalloc.h>
38 #include <wtf/HashCountedSet.h>
39 #include <wtf/UnusedParam.h>
40 
41 #if PLATFORM(DARWIN)
42 
43 #include <mach/mach_port.h>
44 #include <mach/mach_init.h>
45 #include <mach/task.h>
46 #include <mach/thread_act.h>
47 #include <mach/vm_map.h>
48 
49 #elif PLATFORM(WIN_OS)
50 
51 #include <windows.h>
52 
53 #elif PLATFORM(UNIX)
54 
55 #include <stdlib.h>
56 #include <sys/mman.h>
57 #include <unistd.h>
58 
59 #if PLATFORM(SOLARIS)
60 #include <thread.h>
61 #endif
62 
63 #if PLATFORM(OPENBSD)
64 #include <pthread.h>
65 #endif
66 
67 #if HAVE(PTHREAD_NP_H)
68 #include <pthread_np.h>
69 #endif
70 
71 #endif
72 
73 #define DEBUG_COLLECTOR 0
74 #define COLLECT_ON_EVERY_ALLOCATION 0
75 
76 using std::max;
77 
78 namespace JSC {
79 
80 // tunable parameters
81 
82 const size_t SPARE_EMPTY_BLOCKS = 2;
83 const size_t GROWTH_FACTOR = 2;
84 const size_t LOW_WATER_FACTOR = 4;
85 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
86 // This value has to be a macro to be used in max() without introducing
87 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
88 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
89 
90 static void freeHeap(CollectorHeap*);
91 
92 #if ENABLE(JSC_MULTIPLE_THREADS)
93 
94 #if PLATFORM(DARWIN)
95 typedef mach_port_t PlatformThread;
96 #elif PLATFORM(WIN_OS)
97 struct PlatformThread {
PlatformThreadJSC::PlatformThread98     PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
99     DWORD id;
100     HANDLE handle;
101 };
102 #endif
103 
104 class Heap::Thread {
105 public:
Thread(pthread_t pthread,const PlatformThread & platThread,void * base)106     Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
107         : posixThread(pthread)
108         , platformThread(platThread)
109         , stackBase(base)
110     {
111     }
112 
113     Thread* next;
114     pthread_t posixThread;
115     PlatformThread platformThread;
116     void* stackBase;
117 };
118 
119 #endif
120 
Heap(JSGlobalData * globalData)121 Heap::Heap(JSGlobalData* globalData)
122     : m_markListSet(0)
123 #if ENABLE(JSC_MULTIPLE_THREADS)
124     , m_registeredThreads(0)
125     , m_currentThreadRegistrar(0)
126 #endif
127     , m_globalData(globalData)
128 {
129     ASSERT(globalData);
130 
131     memset(&primaryHeap, 0, sizeof(CollectorHeap));
132     memset(&numberHeap, 0, sizeof(CollectorHeap));
133 }
134 
~Heap()135 Heap::~Heap()
136 {
137     // The destroy function must already have been called, so assert this.
138     ASSERT(!m_globalData);
139 }
140 
destroy()141 void Heap::destroy()
142 {
143     JSLock lock(false);
144 
145     if (!m_globalData)
146         return;
147 
148     // The global object is not GC protected at this point, so sweeping may delete it
149     // (and thus the global data) before other objects that may use the global data.
150     RefPtr<JSGlobalData> protect(m_globalData);
151 
152     delete m_markListSet;
153     m_markListSet = 0;
154 
155     sweep<PrimaryHeap>();
156     // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
157 
158     ASSERT(!primaryHeap.numLiveObjects);
159 
160     freeHeap(&primaryHeap);
161     freeHeap(&numberHeap);
162 
163 #if ENABLE(JSC_MULTIPLE_THREADS)
164     if (m_currentThreadRegistrar) {
165         int error = pthread_key_delete(m_currentThreadRegistrar);
166         ASSERT_UNUSED(error, !error);
167     }
168 
169     MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
170     for (Heap::Thread* t = m_registeredThreads; t;) {
171         Heap::Thread* next = t->next;
172         delete t;
173         t = next;
174     }
175 #endif
176 
177     m_globalData = 0;
178 }
179 
180 template <HeapType heapType>
allocateBlock()181 static NEVER_INLINE CollectorBlock* allocateBlock()
182 {
183 #if PLATFORM(DARWIN)
184     vm_address_t address = 0;
185     // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
186     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
187 #elif PLATFORM(SYMBIAN)
188     // no memory map in symbian, need to hack with fastMalloc
189     void* address = fastMalloc(BLOCK_SIZE);
190     memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
191 #elif PLATFORM(WIN_OS)
192      // windows virtual address granularity is naturally 64k
193     LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
194 #elif HAVE(POSIX_MEMALIGN)
195     void* address;
196     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
197     memset(address, 0, BLOCK_SIZE);
198 #else
199 
200 #if ENABLE(JSC_MULTIPLE_THREADS)
201 #error Need to initialize pagesize safely.
202 #endif
203     static size_t pagesize = getpagesize();
204 
205     size_t extra = 0;
206     if (BLOCK_SIZE > pagesize)
207         extra = BLOCK_SIZE - pagesize;
208 
209     void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
210     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
211 
212     size_t adjust = 0;
213     if ((address & BLOCK_OFFSET_MASK) != 0)
214         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
215 
216     if (adjust > 0)
217         munmap(reinterpret_cast<char*>(address), adjust);
218 
219     if (adjust < extra)
220         munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
221 
222     address += adjust;
223     memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
224 #endif
225     reinterpret_cast<CollectorBlock*>(address)->type = heapType;
226     return reinterpret_cast<CollectorBlock*>(address);
227 }
228 
freeBlock(CollectorBlock * block)229 static void freeBlock(CollectorBlock* block)
230 {
231 #if PLATFORM(DARWIN)
232     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
233 #elif PLATFORM(SYMBIAN)
234     fastFree(block);
235 #elif PLATFORM(WIN_OS)
236     VirtualFree(block, 0, MEM_RELEASE);
237 #elif HAVE(POSIX_MEMALIGN)
238     free(block);
239 #else
240     munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
241 #endif
242 }
243 
freeHeap(CollectorHeap * heap)244 static void freeHeap(CollectorHeap* heap)
245 {
246     for (size_t i = 0; i < heap->usedBlocks; ++i)
247         if (heap->blocks[i])
248             freeBlock(heap->blocks[i]);
249     fastFree(heap->blocks);
250     memset(heap, 0, sizeof(CollectorHeap));
251 }
252 
recordExtraCost(size_t cost)253 void Heap::recordExtraCost(size_t cost)
254 {
255     // Our frequency of garbage collection tries to balance memory use against speed
256     // by collecting based on the number of newly created values. However, for values
257     // that hold on to a great deal of memory that's not in the form of other JS values,
258     // that is not good enough - in some cases a lot of those objects can pile up and
259     // use crazy amounts of memory without a GC happening. So we track these extra
260     // memory costs. Only unusually large objects are noted, and we only keep track
261     // of this extra cost until the next GC. In garbage collected languages, most values
262     // are either very short lived temporaries, or have extremely long lifetimes. So
263     // if a large value survives one garbage collection, there is not much point to
264     // collecting more frequently as long as it stays alive.
265     // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
266 
267     primaryHeap.extraCost += cost;
268 }
269 
heapAllocate(size_t s)270 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
271 {
272     typedef typename HeapConstants<heapType>::Block Block;
273     typedef typename HeapConstants<heapType>::Cell Cell;
274 
275     CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
276     ASSERT(JSLock::lockCount() > 0);
277     ASSERT(JSLock::currentThreadIsHoldingLock());
278     ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize);
279 
280     ASSERT(heap.operationInProgress == NoOperation);
281     ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
282     // FIXME: If another global variable access here doesn't hurt performance
283     // too much, we could CRASH() in NDEBUG builds, which could help ensure we
284     // don't spend any time debugging cases where we allocate inside an object's
285     // deallocation code.
286 
287 #if COLLECT_ON_EVERY_ALLOCATION
288     collect();
289 #endif
290 
291     size_t numLiveObjects = heap.numLiveObjects;
292     size_t usedBlocks = heap.usedBlocks;
293     size_t i = heap.firstBlockWithPossibleSpace;
294 
295     // if we have a huge amount of extra cost, we'll try to collect even if we still have
296     // free cells left.
297     if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
298         size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
299         size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
300         const size_t newCost = numNewObjects + heap.extraCost;
301         if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
302             goto collect;
303     }
304 
305     ASSERT(heap.operationInProgress == NoOperation);
306 #ifndef NDEBUG
307     // FIXME: Consider doing this in NDEBUG builds too (see comment above).
308     heap.operationInProgress = Allocation;
309 #endif
310 
311 scan:
312     Block* targetBlock;
313     size_t targetBlockUsedCells;
314     if (i != usedBlocks) {
315         targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
316         targetBlockUsedCells = targetBlock->usedCells;
317         ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
318         while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
319             if (++i == usedBlocks)
320                 goto collect;
321             targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
322             targetBlockUsedCells = targetBlock->usedCells;
323             ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
324         }
325         heap.firstBlockWithPossibleSpace = i;
326     } else {
327 
328 collect:
329         size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
330         size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
331         const size_t newCost = numNewObjects + heap.extraCost;
332 
333         if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
334 #ifndef NDEBUG
335             heap.operationInProgress = NoOperation;
336 #endif
337             bool collected = collect();
338 #ifndef NDEBUG
339             heap.operationInProgress = Allocation;
340 #endif
341             if (collected) {
342                 numLiveObjects = heap.numLiveObjects;
343                 usedBlocks = heap.usedBlocks;
344                 i = heap.firstBlockWithPossibleSpace;
345                 goto scan;
346             }
347         }
348 
349         // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
350         size_t numBlocks = heap.numBlocks;
351         if (usedBlocks == numBlocks) {
352             numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
353             heap.numBlocks = numBlocks;
354             heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
355         }
356 
357         targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>());
358         targetBlock->freeList = targetBlock->cells;
359         targetBlock->heap = this;
360         targetBlockUsedCells = 0;
361         heap.blocks[usedBlocks] = reinterpret_cast<CollectorBlock*>(targetBlock);
362         heap.usedBlocks = usedBlocks + 1;
363         heap.firstBlockWithPossibleSpace = usedBlocks;
364     }
365 
366     // find a free spot in the block and detach it from the free list
367     Cell* newCell = targetBlock->freeList;
368 
369     // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
370     targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
371 
372     targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
373     heap.numLiveObjects = numLiveObjects + 1;
374 
375 #ifndef NDEBUG
376     // FIXME: Consider doing this in NDEBUG builds too (see comment above).
377     heap.operationInProgress = NoOperation;
378 #endif
379 
380     return newCell;
381 }
382 
allocate(size_t s)383 void* Heap::allocate(size_t s)
384 {
385     return heapAllocate<PrimaryHeap>(s);
386 }
387 
allocateNumber(size_t s)388 void* Heap::allocateNumber(size_t s)
389 {
390     return heapAllocate<NumberHeap>(s);
391 }
392 
currentThreadStackBase()393 static inline void* currentThreadStackBase()
394 {
395 #if PLATFORM(DARWIN)
396     pthread_t thread = pthread_self();
397     return pthread_get_stackaddr_np(thread);
398 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
399     // offset 0x18 from the FS segment register gives a pointer to
400     // the thread information block for the current thread
401     NT_TIB* pTib;
402     __asm {
403         MOV EAX, FS:[18h]
404         MOV pTib, EAX
405     }
406     return static_cast<void*>(pTib->StackBase);
407 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
408     PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
409     return reinterpret_cast<void*>(pTib->StackBase);
410 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
411     // offset 0x18 from the FS segment register gives a pointer to
412     // the thread information block for the current thread
413     NT_TIB* pTib;
414     asm ( "movl %%fs:0x18, %0\n"
415           : "=r" (pTib)
416         );
417     return static_cast<void*>(pTib->StackBase);
418 #elif PLATFORM(SOLARIS)
419     stack_t s;
420     thr_stksegment(&s);
421     return s.ss_sp;
422 #elif PLATFORM(OPENBSD)
423     pthread_t thread = pthread_self();
424     stack_t stack;
425     pthread_stackseg_np(thread, &stack);
426     return stack.ss_sp;
427 #elif PLATFORM(UNIX)
428     static void* stackBase = 0;
429     static size_t stackSize = 0;
430     static pthread_t stackThread;
431     pthread_t thread = pthread_self();
432     if (stackBase == 0 || thread != stackThread) {
433         pthread_attr_t sattr;
434         pthread_attr_init(&sattr);
435 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
436         // e.g. on FreeBSD 5.4, neundorf@kde.org
437         pthread_attr_get_np(thread, &sattr);
438 #else
439         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
440         pthread_getattr_np(thread, &sattr);
441 #endif
442         int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
443         (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
444         ASSERT(stackBase);
445         pthread_attr_destroy(&sattr);
446         stackThread = thread;
447     }
448     return static_cast<char*>(stackBase) + stackSize;
449 #elif PLATFORM(SYMBIAN)
450     static void* stackBase = 0;
451     if (stackBase == 0) {
452         TThreadStackInfo info;
453         RThread thread;
454         thread.StackInfo(info);
455         stackBase = (void*)info.iBase;
456     }
457     return (void*)stackBase;
458 #else
459 #error Need a way to get the stack base on this platform
460 #endif
461 }
462 
463 #if ENABLE(JSC_MULTIPLE_THREADS)
464 
getCurrentPlatformThread()465 static inline PlatformThread getCurrentPlatformThread()
466 {
467 #if PLATFORM(DARWIN)
468     return pthread_mach_thread_np(pthread_self());
469 #elif PLATFORM(WIN_OS)
470     HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
471     return PlatformThread(GetCurrentThreadId(), threadHandle);
472 #endif
473 }
474 
makeUsableFromMultipleThreads()475 void Heap::makeUsableFromMultipleThreads()
476 {
477     if (m_currentThreadRegistrar)
478         return;
479 
480     int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
481     if (error)
482         CRASH();
483 }
484 
registerThread()485 void Heap::registerThread()
486 {
487     if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
488         return;
489 
490     pthread_setspecific(m_currentThreadRegistrar, this);
491     Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
492 
493     MutexLocker lock(m_registeredThreadsMutex);
494 
495     thread->next = m_registeredThreads;
496     m_registeredThreads = thread;
497 }
498 
unregisterThread(void * p)499 void Heap::unregisterThread(void* p)
500 {
501     if (p)
502         static_cast<Heap*>(p)->unregisterThread();
503 }
504 
unregisterThread()505 void Heap::unregisterThread()
506 {
507     pthread_t currentPosixThread = pthread_self();
508 
509     MutexLocker lock(m_registeredThreadsMutex);
510 
511     if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
512         Thread* t = m_registeredThreads;
513         m_registeredThreads = m_registeredThreads->next;
514         delete t;
515     } else {
516         Heap::Thread* last = m_registeredThreads;
517         Heap::Thread* t;
518         for (t = m_registeredThreads->next; t; t = t->next) {
519             if (pthread_equal(t->posixThread, currentPosixThread)) {
520                 last->next = t->next;
521                 break;
522             }
523             last = t;
524         }
525         ASSERT(t); // If t is NULL, we never found ourselves in the list.
526         delete t;
527     }
528 }
529 
530 #else // ENABLE(JSC_MULTIPLE_THREADS)
531 
registerThread()532 void Heap::registerThread()
533 {
534 }
535 
536 #endif
537 
538 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
539 
540 // cell size needs to be a power of two for this to be valid
541 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
542 
markConservatively(void * start,void * end)543 void Heap::markConservatively(void* start, void* end)
544 {
545     if (start > end) {
546         void* tmp = start;
547         start = end;
548         end = tmp;
549     }
550 
551     ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
552     ASSERT(IS_POINTER_ALIGNED(start));
553     ASSERT(IS_POINTER_ALIGNED(end));
554 
555     char** p = static_cast<char**>(start);
556     char** e = static_cast<char**>(end);
557 
558     size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
559     size_t usedNumberBlocks = numberHeap.usedBlocks;
560     CollectorBlock** primaryBlocks = primaryHeap.blocks;
561     CollectorBlock** numberBlocks = numberHeap.blocks;
562 
563     const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
564 
565     while (p != e) {
566         char* x = *p++;
567         if (IS_HALF_CELL_ALIGNED(x) && x) {
568             uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
569             xAsBits &= CELL_ALIGN_MASK;
570             uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
571             CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
572             // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
573             for (size_t block = 0; block < usedNumberBlocks; block++) {
574                 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
575                     Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
576                     goto endMarkLoop;
577                 }
578             }
579 
580             // Mark the primary heap
581             for (size_t block = 0; block < usedPrimaryBlocks; block++) {
582                 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
583                     if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree != 0) {
584                         JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
585                         if (!imp->marked())
586                             imp->mark();
587                     }
588                     break;
589                 }
590             }
591         endMarkLoop:
592             ;
593         }
594     }
595 }
596 
markCurrentThreadConservativelyInternal()597 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal()
598 {
599     void* dummy;
600     void* stackPointer = &dummy;
601     void* stackBase = currentThreadStackBase();
602     markConservatively(stackPointer, stackBase);
603 }
604 
markCurrentThreadConservatively()605 void Heap::markCurrentThreadConservatively()
606 {
607     // setjmp forces volatile registers onto the stack
608     jmp_buf registers;
609 #if COMPILER(MSVC)
610 #pragma warning(push)
611 #pragma warning(disable: 4611)
612 #endif
613     setjmp(registers);
614 #if COMPILER(MSVC)
615 #pragma warning(pop)
616 #endif
617 
618     markCurrentThreadConservativelyInternal();
619 }
620 
621 #if ENABLE(JSC_MULTIPLE_THREADS)
622 
suspendThread(const PlatformThread & platformThread)623 static inline void suspendThread(const PlatformThread& platformThread)
624 {
625 #if PLATFORM(DARWIN)
626     thread_suspend(platformThread);
627 #elif PLATFORM(WIN_OS)
628     SuspendThread(platformThread.handle);
629 #else
630 #error Need a way to suspend threads on this platform
631 #endif
632 }
633 
resumeThread(const PlatformThread & platformThread)634 static inline void resumeThread(const PlatformThread& platformThread)
635 {
636 #if PLATFORM(DARWIN)
637     thread_resume(platformThread);
638 #elif PLATFORM(WIN_OS)
639     ResumeThread(platformThread.handle);
640 #else
641 #error Need a way to resume threads on this platform
642 #endif
643 }
644 
645 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
646 
647 #if PLATFORM(DARWIN)
648 
649 #if PLATFORM(X86)
650 typedef i386_thread_state_t PlatformThreadRegisters;
651 #elif PLATFORM(X86_64)
652 typedef x86_thread_state64_t PlatformThreadRegisters;
653 #elif PLATFORM(PPC)
654 typedef ppc_thread_state_t PlatformThreadRegisters;
655 #elif PLATFORM(PPC64)
656 typedef ppc_thread_state64_t PlatformThreadRegisters;
657 #elif PLATFORM(ARM)
658 typedef arm_thread_state_t PlatformThreadRegisters;
659 #else
660 #error Unknown Architecture
661 #endif
662 
663 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
664 typedef CONTEXT PlatformThreadRegisters;
665 #else
666 #error Need a thread register struct for this platform
667 #endif
668 
getPlatformThreadRegisters(const PlatformThread & platformThread,PlatformThreadRegisters & regs)669 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
670 {
671 #if PLATFORM(DARWIN)
672 
673 #if PLATFORM(X86)
674     unsigned user_count = sizeof(regs)/sizeof(int);
675     thread_state_flavor_t flavor = i386_THREAD_STATE;
676 #elif PLATFORM(X86_64)
677     unsigned user_count = x86_THREAD_STATE64_COUNT;
678     thread_state_flavor_t flavor = x86_THREAD_STATE64;
679 #elif PLATFORM(PPC)
680     unsigned user_count = PPC_THREAD_STATE_COUNT;
681     thread_state_flavor_t flavor = PPC_THREAD_STATE;
682 #elif PLATFORM(PPC64)
683     unsigned user_count = PPC_THREAD_STATE64_COUNT;
684     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
685 #elif PLATFORM(ARM)
686     unsigned user_count = ARM_THREAD_STATE_COUNT;
687     thread_state_flavor_t flavor = ARM_THREAD_STATE;
688 #else
689 #error Unknown Architecture
690 #endif
691 
692     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
693     if (result != KERN_SUCCESS) {
694         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
695                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
696         CRASH();
697     }
698     return user_count * sizeof(usword_t);
699 // end PLATFORM(DARWIN)
700 
701 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
702     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
703     GetThreadContext(platformThread.handle, &regs);
704     return sizeof(CONTEXT);
705 #else
706 #error Need a way to get thread registers on this platform
707 #endif
708 }
709 
otherThreadStackPointer(const PlatformThreadRegisters & regs)710 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
711 {
712 #if PLATFORM(DARWIN)
713 
714 #if __DARWIN_UNIX03
715 
716 #if PLATFORM(X86)
717     return reinterpret_cast<void*>(regs.__esp);
718 #elif PLATFORM(X86_64)
719     return reinterpret_cast<void*>(regs.__rsp);
720 #elif PLATFORM(PPC) || PLATFORM(PPC64)
721     return reinterpret_cast<void*>(regs.__r1);
722 #elif PLATFORM(ARM)
723     return reinterpret_cast<void*>(regs.__sp);
724 #else
725 #error Unknown Architecture
726 #endif
727 
728 #else // !__DARWIN_UNIX03
729 
730 #if PLATFORM(X86)
731     return reinterpret_cast<void*>(regs.esp);
732 #elif PLATFORM(X86_64)
733     return reinterpret_cast<void*>(regs.rsp);
734 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
735     return reinterpret_cast<void*>(regs.r1);
736 #else
737 #error Unknown Architecture
738 #endif
739 
740 #endif // __DARWIN_UNIX03
741 
742 // end PLATFORM(DARWIN)
743 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
744     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
745 #else
746 #error Need a way to get the stack pointer for another thread on this platform
747 #endif
748 }
749 
markOtherThreadConservatively(Thread * thread)750 void Heap::markOtherThreadConservatively(Thread* thread)
751 {
752     suspendThread(thread->platformThread);
753 
754     PlatformThreadRegisters regs;
755     size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
756 
757     // mark the thread's registers
758     markConservatively(static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
759 
760     void* stackPointer = otherThreadStackPointer(regs);
761     markConservatively(stackPointer, thread->stackBase);
762 
763     resumeThread(thread->platformThread);
764 }
765 
766 #endif
767 
markStackObjectsConservatively()768 void Heap::markStackObjectsConservatively()
769 {
770     markCurrentThreadConservatively();
771 
772 #if ENABLE(JSC_MULTIPLE_THREADS)
773 
774     if (m_currentThreadRegistrar) {
775 
776         MutexLocker lock(m_registeredThreadsMutex);
777 
778 #ifndef NDEBUG
779         // Forbid malloc during the mark phase. Marking a thread suspends it, so
780         // a malloc inside mark() would risk a deadlock with a thread that had been
781         // suspended while holding the malloc lock.
782         fastMallocForbid();
783 #endif
784         // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
785         // and since this is a shared heap, they are real locks.
786         for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
787             if (!pthread_equal(thread->posixThread, pthread_self()))
788                 markOtherThreadConservatively(thread);
789         }
790 #ifndef NDEBUG
791         fastMallocAllow();
792 #endif
793     }
794 #endif
795 }
796 
setGCProtectNeedsLocking()797 void Heap::setGCProtectNeedsLocking()
798 {
799     // Most clients do not need to call this, with the notable exception of WebCore.
800     // Clients that use shared heap have JSLock protection, while others are supposed
801     // to do explicit locking. WebCore violates this contract in Database code,
802     // which calls gcUnprotect from a secondary thread.
803     if (!m_protectedValuesMutex)
804         m_protectedValuesMutex.set(new Mutex);
805 }
806 
protect(JSValuePtr k)807 void Heap::protect(JSValuePtr k)
808 {
809     ASSERT(k);
810     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
811 
812     if (!k.isCell())
813         return;
814 
815     if (m_protectedValuesMutex)
816         m_protectedValuesMutex->lock();
817 
818     m_protectedValues.add(k.asCell());
819 
820     if (m_protectedValuesMutex)
821         m_protectedValuesMutex->unlock();
822 }
823 
unprotect(JSValuePtr k)824 void Heap::unprotect(JSValuePtr k)
825 {
826     ASSERT(k);
827     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
828 
829     if (!k.isCell())
830         return;
831 
832     if (m_protectedValuesMutex)
833         m_protectedValuesMutex->lock();
834 
835     m_protectedValues.remove(k.asCell());
836 
837     if (m_protectedValuesMutex)
838         m_protectedValuesMutex->unlock();
839 }
840 
heap(JSValuePtr v)841 Heap* Heap::heap(JSValuePtr v)
842 {
843     if (!v.isCell())
844         return 0;
845     return Heap::cellBlock(v.asCell())->heap;
846 }
847 
markProtectedObjects()848 void Heap::markProtectedObjects()
849 {
850     if (m_protectedValuesMutex)
851         m_protectedValuesMutex->lock();
852 
853     ProtectCountSet::iterator end = m_protectedValues.end();
854     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
855         JSCell* val = it->first;
856         if (!val->marked())
857             val->mark();
858     }
859 
860     if (m_protectedValuesMutex)
861         m_protectedValuesMutex->unlock();
862 }
863 
sweep()864 template <HeapType heapType> size_t Heap::sweep()
865 {
866     typedef typename HeapConstants<heapType>::Block Block;
867     typedef typename HeapConstants<heapType>::Cell Cell;
868 
869     // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
870     CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
871 
872     size_t emptyBlocks = 0;
873     size_t numLiveObjects = heap.numLiveObjects;
874 
875     for (size_t block = 0; block < heap.usedBlocks; block++) {
876         Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]);
877 
878         size_t usedCells = curBlock->usedCells;
879         Cell* freeList = curBlock->freeList;
880 
881         if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
882             // special case with a block where all cells are used -- testing indicates this happens often
883             for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
884                 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
885                     Cell* cell = curBlock->cells + i;
886 
887                     if (heapType != NumberHeap) {
888                         JSCell* imp = reinterpret_cast<JSCell*>(cell);
889                         // special case for allocated but uninitialized object
890                         // (We don't need this check earlier because nothing prior this point
891                         // assumes the object has a valid vptr.)
892                         if (cell->u.freeCell.zeroIfFree == 0)
893                             continue;
894 
895                         imp->~JSCell();
896                     }
897 
898                     --usedCells;
899                     --numLiveObjects;
900 
901                     // put cell on the free list
902                     cell->u.freeCell.zeroIfFree = 0;
903                     cell->u.freeCell.next = freeList - (cell + 1);
904                     freeList = cell;
905                 }
906             }
907         } else {
908             size_t minimumCellsToProcess = usedCells;
909             for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
910                 Cell* cell = curBlock->cells + i;
911                 if (cell->u.freeCell.zeroIfFree == 0) {
912                     ++minimumCellsToProcess;
913                 } else {
914                     if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
915                         if (heapType != NumberHeap) {
916                             JSCell* imp = reinterpret_cast<JSCell*>(cell);
917                             imp->~JSCell();
918                         }
919                         --usedCells;
920                         --numLiveObjects;
921 
922                         // put cell on the free list
923                         cell->u.freeCell.zeroIfFree = 0;
924                         cell->u.freeCell.next = freeList - (cell + 1);
925                         freeList = cell;
926                     }
927                 }
928             }
929         }
930 
931         curBlock->usedCells = static_cast<uint32_t>(usedCells);
932         curBlock->freeList = freeList;
933         curBlock->marked.clearAll();
934 
935         if (usedCells == 0) {
936             emptyBlocks++;
937             if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
938 #if !DEBUG_COLLECTOR
939                 freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));
940 #endif
941                 // swap with the last block so we compact as we go
942                 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
943                 heap.usedBlocks--;
944                 block--; // Don't move forward a step in this case
945 
946                 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
947                     heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
948                     heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
949                 }
950             }
951         }
952     }
953 
954     if (heap.numLiveObjects != numLiveObjects)
955         heap.firstBlockWithPossibleSpace = 0;
956 
957     heap.numLiveObjects = numLiveObjects;
958     heap.numLiveObjectsAtLastCollect = numLiveObjects;
959     heap.extraCost = 0;
960     return numLiveObjects;
961 }
962 
collect()963 bool Heap::collect()
964 {
965 #ifndef NDEBUG
966     if (m_globalData->isSharedInstance) {
967         ASSERT(JSLock::lockCount() > 0);
968         ASSERT(JSLock::currentThreadIsHoldingLock());
969     }
970 #endif
971 
972     ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
973     if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
974         CRASH();
975 
976     JAVASCRIPTCORE_GC_BEGIN();
977     primaryHeap.operationInProgress = Collection;
978     numberHeap.operationInProgress = Collection;
979 
980     // MARK: first mark all referenced objects recursively starting out from the set of root objects
981 
982     markStackObjectsConservatively();
983     markProtectedObjects();
984     if (m_markListSet && m_markListSet->size())
985         ArgList::markLists(*m_markListSet);
986     if (m_globalData->exception && !m_globalData->exception.marked())
987         m_globalData->exception.mark();
988     m_globalData->interpreter->registerFile().markCallFrames(this);
989     m_globalData->smallStrings.mark();
990     if (m_globalData->scopeNodeBeingReparsed)
991         m_globalData->scopeNodeBeingReparsed->mark();
992 
993     JAVASCRIPTCORE_GC_MARKED();
994 
995     size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
996     size_t numLiveObjects = sweep<PrimaryHeap>();
997     numLiveObjects += sweep<NumberHeap>();
998 
999     primaryHeap.operationInProgress = NoOperation;
1000     numberHeap.operationInProgress = NoOperation;
1001     JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects);
1002 
1003     return numLiveObjects < originalLiveObjects;
1004 }
1005 
objectCount()1006 size_t Heap::objectCount()
1007 {
1008     return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count();
1009 }
1010 
1011 template <HeapType heapType>
addToStatistics(Heap::Statistics & statistics,const CollectorHeap & heap)1012 static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap)
1013 {
1014     typedef HeapConstants<heapType> HC;
1015     for (size_t i = 0; i < heap.usedBlocks; ++i) {
1016         if (heap.blocks[i]) {
1017             statistics.size += BLOCK_SIZE;
1018             statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize;
1019         }
1020     }
1021 }
1022 
statistics() const1023 Heap::Statistics Heap::statistics() const
1024 {
1025     Statistics statistics = { 0, 0 };
1026     JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap);
1027     JSC::addToStatistics<NumberHeap>(statistics, numberHeap);
1028     return statistics;
1029 }
1030 
globalObjectCount()1031 size_t Heap::globalObjectCount()
1032 {
1033     size_t count = 0;
1034     if (JSGlobalObject* head = m_globalData->head) {
1035         JSGlobalObject* o = head;
1036         do {
1037             ++count;
1038             o = o->next();
1039         } while (o != head);
1040     }
1041     return count;
1042 }
1043 
protectedGlobalObjectCount()1044 size_t Heap::protectedGlobalObjectCount()
1045 {
1046     if (m_protectedValuesMutex)
1047         m_protectedValuesMutex->lock();
1048 
1049     size_t count = 0;
1050     if (JSGlobalObject* head = m_globalData->head) {
1051         JSGlobalObject* o = head;
1052         do {
1053             if (m_protectedValues.contains(o))
1054                 ++count;
1055             o = o->next();
1056         } while (o != head);
1057     }
1058 
1059     if (m_protectedValuesMutex)
1060         m_protectedValuesMutex->unlock();
1061 
1062     return count;
1063 }
1064 
protectedObjectCount()1065 size_t Heap::protectedObjectCount()
1066 {
1067     if (m_protectedValuesMutex)
1068         m_protectedValuesMutex->lock();
1069 
1070     size_t result = m_protectedValues.size();
1071 
1072     if (m_protectedValuesMutex)
1073         m_protectedValuesMutex->unlock();
1074 
1075     return result;
1076 }
1077 
typeName(JSCell * cell)1078 static const char* typeName(JSCell* cell)
1079 {
1080     if (cell->isString())
1081         return "string";
1082     if (cell->isNumber())
1083         return "number";
1084     if (cell->isGetterSetter())
1085         return "gettersetter";
1086     ASSERT(cell->isObject());
1087     const ClassInfo* info = static_cast<JSObject*>(cell)->classInfo();
1088     return info ? info->className : "Object";
1089 }
1090 
protectedObjectTypeCounts()1091 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1092 {
1093     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1094 
1095     if (m_protectedValuesMutex)
1096         m_protectedValuesMutex->lock();
1097 
1098     ProtectCountSet::iterator end = m_protectedValues.end();
1099     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1100         counts->add(typeName(it->first));
1101 
1102     if (m_protectedValuesMutex)
1103         m_protectedValuesMutex->unlock();
1104 
1105     return counts;
1106 }
1107 
isBusy()1108 bool Heap::isBusy()
1109 {
1110     return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1111 }
1112 
primaryHeapBegin()1113 Heap::iterator Heap::primaryHeapBegin()
1114 {
1115     return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1116 }
1117 
primaryHeapEnd()1118 Heap::iterator Heap::primaryHeapEnd()
1119 {
1120     return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1121 }
1122 
1123 } // namespace JSC
1124