• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 #include "config.h"
22 #include "Collector.h"
23 
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "Interpreter.h"
29 #include "JSArray.h"
30 #include "JSGlobalObject.h"
31 #include "JSLock.h"
32 #include "JSONObject.h"
33 #include "JSString.h"
34 #include "JSValue.h"
35 #include "JSZombie.h"
36 #include "MarkStack.h"
37 #include "Nodes.h"
38 #include "Tracing.h"
39 #include <algorithm>
40 #include <limits.h>
41 #include <setjmp.h>
42 #include <stdlib.h>
43 #include <wtf/FastMalloc.h>
44 #include <wtf/HashCountedSet.h>
45 #include <wtf/UnusedParam.h>
46 #include <wtf/VMTags.h>
47 
48 #if OS(DARWIN)
49 
50 #include <mach/mach_init.h>
51 #include <mach/mach_port.h>
52 #include <mach/task.h>
53 #include <mach/thread_act.h>
54 #include <mach/vm_map.h>
55 
56 #elif OS(SYMBIAN)
57 #include <e32std.h>
58 #include <e32cmn.h>
59 #include <unistd.h>
60 
61 #elif OS(WINDOWS)
62 
63 #include <windows.h>
64 #include <malloc.h>
65 
66 #elif OS(HAIKU)
67 
68 #include <OS.h>
69 
70 #elif OS(UNIX)
71 
72 #include <stdlib.h>
73 #if !OS(HAIKU)
74 #include <sys/mman.h>
75 #endif
76 #include <unistd.h>
77 
78 #if OS(SOLARIS)
79 #include <thread.h>
80 #else
81 #include <pthread.h>
82 #endif
83 
84 #if HAVE(PTHREAD_NP_H)
85 #include <pthread_np.h>
86 #endif
87 
88 #if OS(QNX)
89 #include <fcntl.h>
90 #include <sys/procfs.h>
91 #include <stdio.h>
92 #include <errno.h>
93 #endif
94 
95 #endif
96 
97 #define COLLECT_ON_EVERY_ALLOCATION 0
98 
99 using std::max;
100 
101 namespace JSC {
102 
103 // tunable parameters
104 
105 const size_t GROWTH_FACTOR = 2;
106 const size_t LOW_WATER_FACTOR = 4;
107 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
108 // This value has to be a macro to be used in max() without introducing
109 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
110 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
111 
112 #if OS(SYMBIAN)
113 const size_t MAX_NUM_BLOCKS = 256; // Max size of collector heap set to 16 MB
114 static RHeap* userChunk = 0;
115 #endif
116 
117 #if ENABLE(JSC_MULTIPLE_THREADS)
118 
119 #if OS(DARWIN)
120 typedef mach_port_t PlatformThread;
121 #elif OS(WINDOWS)
122 typedef HANDLE PlatformThread;
123 #endif
124 
125 class Heap::Thread {
126 public:
Thread(pthread_t pthread,const PlatformThread & platThread,void * base)127     Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
128         : posixThread(pthread)
129         , platformThread(platThread)
130         , stackBase(base)
131     {
132     }
133 
134     Thread* next;
135     pthread_t posixThread;
136     PlatformThread platformThread;
137     void* stackBase;
138 };
139 
140 #endif
141 
Heap(JSGlobalData * globalData)142 Heap::Heap(JSGlobalData* globalData)
143     : m_markListSet(0)
144 #if ENABLE(JSC_MULTIPLE_THREADS)
145     , m_registeredThreads(0)
146     , m_currentThreadRegistrar(0)
147 #endif
148     , m_globalData(globalData)
149 {
150     ASSERT(globalData);
151 
152 #if OS(SYMBIAN)
153     // Symbian OpenC supports mmap but currently not the MAP_ANON flag.
154     // Using fastMalloc() does not properly align blocks on 64k boundaries
155     // and previous implementation was flawed/incomplete.
156     // UserHeap::ChunkHeap allows allocation of continuous memory and specification
157     // of alignment value for (symbian) cells within that heap.
158     //
159     // Clarification and mapping of terminology:
160     // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk,
161     // which can dynamically grow up to 8 MB,
162     // that holds all CollectorBlocks of this session (static).
163     // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock.
164     // JSCell objects are maintained as usual within CollectorBlocks.
165     if (!userChunk) {
166         userChunk = UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
167         if (!userChunk)
168             CRASH();
169     }
170 #endif // OS(SYMBIAN)
171 
172     memset(&m_heap, 0, sizeof(CollectorHeap));
173     allocateBlock();
174 }
175 
~Heap()176 Heap::~Heap()
177 {
178     // The destroy function must already have been called, so assert this.
179     ASSERT(!m_globalData);
180 }
181 
destroy()182 void Heap::destroy()
183 {
184     JSLock lock(SilenceAssertionsOnly);
185 
186     if (!m_globalData)
187         return;
188 
189     ASSERT(!m_globalData->dynamicGlobalObject);
190     ASSERT(!isBusy());
191 
192     // The global object is not GC protected at this point, so sweeping may delete it
193     // (and thus the global data) before other objects that may use the global data.
194     RefPtr<JSGlobalData> protect(m_globalData);
195 
196     delete m_markListSet;
197     m_markListSet = 0;
198 
199     freeBlocks();
200 
201 #if ENABLE(JSC_MULTIPLE_THREADS)
202     if (m_currentThreadRegistrar) {
203         int error = pthread_key_delete(m_currentThreadRegistrar);
204         ASSERT_UNUSED(error, !error);
205     }
206 
207     MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
208     for (Heap::Thread* t = m_registeredThreads; t;) {
209         Heap::Thread* next = t->next;
210         delete t;
211         t = next;
212     }
213 #endif
214 
215     m_globalData = 0;
216 }
217 
allocateBlock()218 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
219 {
220 #if OS(DARWIN)
221     vm_address_t address = 0;
222     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
223 #elif OS(SYMBIAN)
224     // Allocate a 64 kb aligned CollectorBlock
225     unsigned char* mask = reinterpret_cast<unsigned char*>(userChunk->Alloc(BLOCK_SIZE));
226     if (!mask)
227         CRASH();
228     uintptr_t address = reinterpret_cast<uintptr_t>(mask);
229 #elif OS(WINCE)
230     void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
231 #elif OS(WINDOWS)
232 #if COMPILER(MINGW)
233     void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
234 #else
235     void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
236 #endif
237     memset(address, 0, BLOCK_SIZE);
238 #elif HAVE(POSIX_MEMALIGN)
239     void* address;
240     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
241 #else
242 
243 #if ENABLE(JSC_MULTIPLE_THREADS)
244 #error Need to initialize pagesize safely.
245 #endif
246     static size_t pagesize = getpagesize();
247 
248     size_t extra = 0;
249     if (BLOCK_SIZE > pagesize)
250         extra = BLOCK_SIZE - pagesize;
251 
252     void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
253     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
254 
255     size_t adjust = 0;
256     if ((address & BLOCK_OFFSET_MASK) != 0)
257         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
258 
259     if (adjust > 0)
260         munmap(reinterpret_cast<char*>(address), adjust);
261 
262     if (adjust < extra)
263         munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
264 
265     address += adjust;
266 #endif
267 
268     // Initialize block.
269 
270     CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
271     block->heap = this;
272     clearMarkBits(block);
273 
274     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
275     for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i)
276         new (block->cells + i) JSCell(dummyMarkableCellStructure);
277 
278     // Add block to blocks vector.
279 
280     size_t numBlocks = m_heap.numBlocks;
281     if (m_heap.usedBlocks == numBlocks) {
282         static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
283         if (numBlocks > maxNumBlocks)
284             CRASH();
285         numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
286         m_heap.numBlocks = numBlocks;
287         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
288     }
289     m_heap.blocks[m_heap.usedBlocks++] = block;
290 
291     return block;
292 }
293 
freeBlock(size_t block)294 NEVER_INLINE void Heap::freeBlock(size_t block)
295 {
296     m_heap.didShrink = true;
297 
298     ObjectIterator it(m_heap, block);
299     ObjectIterator end(m_heap, block + 1);
300     for ( ; it != end; ++it)
301         (*it)->~JSCell();
302     freeBlockPtr(m_heap.blocks[block]);
303 
304     // swap with the last block so we compact as we go
305     m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
306     m_heap.usedBlocks--;
307 
308     if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
309         m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
310         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
311     }
312 }
313 
freeBlockPtr(CollectorBlock * block)314 NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
315 {
316 #if OS(DARWIN)
317     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
318 #elif OS(SYMBIAN)
319     userChunk->Free(reinterpret_cast<TAny*>(block));
320 #elif OS(WINCE)
321     VirtualFree(block, 0, MEM_RELEASE);
322 #elif OS(WINDOWS)
323 #if COMPILER(MINGW)
324     __mingw_aligned_free(block);
325 #else
326     _aligned_free(block);
327 #endif
328 #elif HAVE(POSIX_MEMALIGN)
329     free(block);
330 #else
331     munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
332 #endif
333 }
334 
freeBlocks()335 void Heap::freeBlocks()
336 {
337     ProtectCountSet protectedValuesCopy = m_protectedValues;
338 
339     clearMarkBits();
340     ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
341     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
342         markCell(it->first);
343 
344     m_heap.nextCell = 0;
345     m_heap.nextBlock = 0;
346     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
347     DeadObjectIterator end(m_heap, m_heap.usedBlocks);
348     for ( ; it != end; ++it)
349         (*it)->~JSCell();
350 
351     ASSERT(!protectedObjectCount());
352 
353     protectedValuesEnd = protectedValuesCopy.end();
354     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
355         it->first->~JSCell();
356 
357     for (size_t block = 0; block < m_heap.usedBlocks; ++block)
358         freeBlockPtr(m_heap.blocks[block]);
359 
360     fastFree(m_heap.blocks);
361 
362     memset(&m_heap, 0, sizeof(CollectorHeap));
363 }
364 
recordExtraCost(size_t cost)365 void Heap::recordExtraCost(size_t cost)
366 {
367     // Our frequency of garbage collection tries to balance memory use against speed
368     // by collecting based on the number of newly created values. However, for values
369     // that hold on to a great deal of memory that's not in the form of other JS values,
370     // that is not good enough - in some cases a lot of those objects can pile up and
371     // use crazy amounts of memory without a GC happening. So we track these extra
372     // memory costs. Only unusually large objects are noted, and we only keep track
373     // of this extra cost until the next GC. In garbage collected languages, most values
374     // are either very short lived temporaries, or have extremely long lifetimes. So
375     // if a large value survives one garbage collection, there is not much point to
376     // collecting more frequently as long as it stays alive.
377 
378     if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) {
379         // If the last iteration through the heap deallocated blocks, we need
380         // to clean up remaining garbage before marking. Otherwise, the conservative
381         // marking mechanism might follow a pointer to unmapped memory.
382         if (m_heap.didShrink)
383             sweep();
384         reset();
385     }
386     m_heap.extraCost += cost;
387 }
388 
allocate(size_t s)389 void* Heap::allocate(size_t s)
390 {
391     typedef HeapConstants::Block Block;
392     typedef HeapConstants::Cell Cell;
393 
394     ASSERT(JSLock::lockCount() > 0);
395     ASSERT(JSLock::currentThreadIsHoldingLock());
396     ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
397 
398     ASSERT(m_heap.operationInProgress == NoOperation);
399 
400 #if COLLECT_ON_EVERY_ALLOCATION
401     collectAllGarbage();
402     ASSERT(m_heap.operationInProgress == NoOperation);
403 #endif
404 
405 allocate:
406 
407     // Fast case: find the next garbage cell and recycle it.
408 
409     do {
410         ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
411         Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
412         do {
413             ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
414             if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
415                 Cell* cell = block->cells + m_heap.nextCell;
416 
417                 m_heap.operationInProgress = Allocation;
418                 JSCell* imp = reinterpret_cast<JSCell*>(cell);
419                 imp->~JSCell();
420                 m_heap.operationInProgress = NoOperation;
421 
422                 ++m_heap.nextCell;
423                 return cell;
424             }
425         } while (++m_heap.nextCell != HeapConstants::cellsPerBlock);
426         m_heap.nextCell = 0;
427     } while (++m_heap.nextBlock != m_heap.usedBlocks);
428 
429     // Slow case: reached the end of the heap. Mark live objects and start over.
430 
431     reset();
432     goto allocate;
433 }
434 
resizeBlocks()435 void Heap::resizeBlocks()
436 {
437     m_heap.didShrink = false;
438 
439     size_t usedCellCount = markedCells();
440     size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
441     size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
442 
443     size_t maxCellCount = 1.25f * minCellCount;
444     size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
445 
446     if (m_heap.usedBlocks < minBlockCount)
447         growBlocks(minBlockCount);
448     else if (m_heap.usedBlocks > maxBlockCount)
449         shrinkBlocks(maxBlockCount);
450 }
451 
growBlocks(size_t neededBlocks)452 void Heap::growBlocks(size_t neededBlocks)
453 {
454     ASSERT(m_heap.usedBlocks < neededBlocks);
455     while (m_heap.usedBlocks < neededBlocks)
456         allocateBlock();
457 }
458 
shrinkBlocks(size_t neededBlocks)459 void Heap::shrinkBlocks(size_t neededBlocks)
460 {
461     ASSERT(m_heap.usedBlocks > neededBlocks);
462 
463     // Clear the always-on last bit, so isEmpty() isn't fooled by it.
464     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
465         m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
466 
467     for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
468         if (m_heap.blocks[i]->marked.isEmpty()) {
469             freeBlock(i);
470         } else
471             ++i;
472     }
473 
474     // Reset the always-on last bit.
475     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
476         m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
477 }
478 
479 #if OS(WINCE)
480 void* g_stackBase = 0;
481 
isPageWritable(void * page)482 inline bool isPageWritable(void* page)
483 {
484     MEMORY_BASIC_INFORMATION memoryInformation;
485     DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
486 
487     // return false on error, including ptr outside memory
488     if (result != sizeof(memoryInformation))
489         return false;
490 
491     DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
492     return protect == PAGE_READWRITE
493         || protect == PAGE_WRITECOPY
494         || protect == PAGE_EXECUTE_READWRITE
495         || protect == PAGE_EXECUTE_WRITECOPY;
496 }
497 
getStackBase(void * previousFrame)498 static void* getStackBase(void* previousFrame)
499 {
500     // find the address of this stack frame by taking the address of a local variable
501     bool isGrowingDownward;
502     void* thisFrame = (void*)(&isGrowingDownward);
503 
504     isGrowingDownward = previousFrame < &thisFrame;
505     static DWORD pageSize = 0;
506     if (!pageSize) {
507         SYSTEM_INFO systemInfo;
508         GetSystemInfo(&systemInfo);
509         pageSize = systemInfo.dwPageSize;
510     }
511 
512     // scan all of memory starting from this frame, and return the last writeable page found
513     register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
514     if (isGrowingDownward) {
515         while (currentPage > 0) {
516             // check for underflow
517             if (currentPage >= (char*)pageSize)
518                 currentPage -= pageSize;
519             else
520                 currentPage = 0;
521             if (!isPageWritable(currentPage))
522                 return currentPage + pageSize;
523         }
524         return 0;
525     } else {
526         while (true) {
527             // guaranteed to complete because isPageWritable returns false at end of memory
528             currentPage += pageSize;
529             if (!isPageWritable(currentPage))
530                 return currentPage;
531         }
532     }
533 }
534 #endif
535 
536 #if OS(QNX)
currentThreadStackBaseQNX()537 static inline void *currentThreadStackBaseQNX()
538 {
539     static void* stackBase = 0;
540     static size_t stackSize = 0;
541     static pthread_t stackThread;
542     pthread_t thread = pthread_self();
543     if (stackBase == 0 || thread != stackThread) {
544         struct _debug_thread_info threadInfo;
545         memset(&threadInfo, 0, sizeof(threadInfo));
546         threadInfo.tid = pthread_self();
547         int fd = open("/proc/self", O_RDONLY);
548         if (fd == -1) {
549             LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
550             return 0;
551         }
552         devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
553         close(fd);
554         stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
555         stackSize = threadInfo.stksize;
556         ASSERT(stackBase);
557         stackThread = thread;
558     }
559     return static_cast<char*>(stackBase) + stackSize;
560 }
561 #endif
562 
currentThreadStackBase()563 static inline void* currentThreadStackBase()
564 {
565 #if OS(DARWIN)
566     pthread_t thread = pthread_self();
567     return pthread_get_stackaddr_np(thread);
568 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
569     // offset 0x18 from the FS segment register gives a pointer to
570     // the thread information block for the current thread
571     NT_TIB* pTib;
572     __asm {
573         MOV EAX, FS:[18h]
574         MOV pTib, EAX
575     }
576     return static_cast<void*>(pTib->StackBase);
577 #elif OS(WINDOWS) && CPU(X86_64) && COMPILER(MSVC)
578     // FIXME: why only for MSVC?
579     PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
580     return reinterpret_cast<void*>(pTib->StackBase);
581 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
582     // offset 0x18 from the FS segment register gives a pointer to
583     // the thread information block for the current thread
584     NT_TIB* pTib;
585     asm ( "movl %%fs:0x18, %0\n"
586           : "=r" (pTib)
587         );
588     return static_cast<void*>(pTib->StackBase);
589 #elif OS(QNX)
590     return currentThreadStackBaseQNX();
591 #elif OS(SOLARIS)
592     stack_t s;
593     thr_stksegment(&s);
594     return s.ss_sp;
595 #elif OS(OPENBSD)
596     pthread_t thread = pthread_self();
597     stack_t stack;
598     pthread_stackseg_np(thread, &stack);
599     return stack.ss_sp;
600 #elif OS(SYMBIAN)
601     static void* stackBase = 0;
602     if (stackBase == 0) {
603         TThreadStackInfo info;
604         RThread thread;
605         thread.StackInfo(info);
606         stackBase = (void*)info.iBase;
607     }
608     return (void*)stackBase;
609 #elif OS(HAIKU)
610     thread_info threadInfo;
611     get_thread_info(find_thread(NULL), &threadInfo);
612     return threadInfo.stack_end;
613 #elif OS(UNIX)
614     static void* stackBase = 0;
615     static size_t stackSize = 0;
616     static pthread_t stackThread;
617     pthread_t thread = pthread_self();
618     if (stackBase == 0 || thread != stackThread) {
619         pthread_attr_t sattr;
620         pthread_attr_init(&sattr);
621 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
622         // e.g. on FreeBSD 5.4, neundorf@kde.org
623         pthread_attr_get_np(thread, &sattr);
624 #else
625         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
626         pthread_getattr_np(thread, &sattr);
627 #endif
628         int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
629         (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
630         ASSERT(stackBase);
631         pthread_attr_destroy(&sattr);
632         stackThread = thread;
633     }
634     return static_cast<char*>(stackBase) + stackSize;
635 #elif OS(WINCE)
636     if (g_stackBase)
637         return g_stackBase;
638     else {
639         int dummy;
640         return getStackBase(&dummy);
641     }
642 #else
643 #error Need a way to get the stack base on this platform
644 #endif
645 }
646 
647 #if ENABLE(JSC_MULTIPLE_THREADS)
648 
getCurrentPlatformThread()649 static inline PlatformThread getCurrentPlatformThread()
650 {
651 #if OS(DARWIN)
652     return pthread_mach_thread_np(pthread_self());
653 #elif OS(WINDOWS)
654     return pthread_getw32threadhandle_np(pthread_self());
655 #endif
656 }
657 
makeUsableFromMultipleThreads()658 void Heap::makeUsableFromMultipleThreads()
659 {
660     if (m_currentThreadRegistrar)
661         return;
662 
663     int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
664     if (error)
665         CRASH();
666 }
667 
registerThread()668 void Heap::registerThread()
669 {
670     ASSERT(!m_globalData->mainThreadOnly || isMainThread());
671 
672     if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
673         return;
674 
675     pthread_setspecific(m_currentThreadRegistrar, this);
676     Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
677 
678     MutexLocker lock(m_registeredThreadsMutex);
679 
680     thread->next = m_registeredThreads;
681     m_registeredThreads = thread;
682 }
683 
unregisterThread(void * p)684 void Heap::unregisterThread(void* p)
685 {
686     if (p)
687         static_cast<Heap*>(p)->unregisterThread();
688 }
689 
unregisterThread()690 void Heap::unregisterThread()
691 {
692     pthread_t currentPosixThread = pthread_self();
693 
694     MutexLocker lock(m_registeredThreadsMutex);
695 
696     if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
697         Thread* t = m_registeredThreads;
698         m_registeredThreads = m_registeredThreads->next;
699         delete t;
700     } else {
701         Heap::Thread* last = m_registeredThreads;
702         Heap::Thread* t;
703         for (t = m_registeredThreads->next; t; t = t->next) {
704             if (pthread_equal(t->posixThread, currentPosixThread)) {
705                 last->next = t->next;
706                 break;
707             }
708             last = t;
709         }
710         ASSERT(t); // If t is NULL, we never found ourselves in the list.
711         delete t;
712     }
713 }
714 
715 #else // ENABLE(JSC_MULTIPLE_THREADS)
716 
registerThread()717 void Heap::registerThread()
718 {
719 }
720 
721 #endif
722 
isPointerAligned(void * p)723 inline bool isPointerAligned(void* p)
724 {
725     return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
726 }
727 
728 // Cell size needs to be a power of two for isPossibleCell to be valid.
729 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
730 
731 #if USE(JSVALUE32)
isHalfCellAligned(void * p)732 static bool isHalfCellAligned(void *p)
733 {
734     return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0);
735 }
736 
isPossibleCell(void * p)737 static inline bool isPossibleCell(void* p)
738 {
739     return isHalfCellAligned(p) && p;
740 }
741 
742 #else
743 
isCellAligned(void * p)744 static inline bool isCellAligned(void *p)
745 {
746     return (((intptr_t)(p) & CELL_MASK) == 0);
747 }
748 
isPossibleCell(void * p)749 static inline bool isPossibleCell(void* p)
750 {
751     return isCellAligned(p) && p;
752 }
753 #endif // USE(JSVALUE32)
754 
markConservatively(MarkStack & markStack,void * start,void * end)755 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
756 {
757     if (start > end) {
758         void* tmp = start;
759         start = end;
760         end = tmp;
761     }
762 
763     ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
764     ASSERT(isPointerAligned(start));
765     ASSERT(isPointerAligned(end));
766 
767     char** p = static_cast<char**>(start);
768     char** e = static_cast<char**>(end);
769 
770     CollectorBlock** blocks = m_heap.blocks;
771     while (p != e) {
772         char* x = *p++;
773         if (isPossibleCell(x)) {
774             size_t usedBlocks;
775             uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
776             xAsBits &= CELL_ALIGN_MASK;
777 
778             uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
779             const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
780             if (offset > lastCellOffset)
781                 continue;
782 
783             CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
784             usedBlocks = m_heap.usedBlocks;
785             for (size_t block = 0; block < usedBlocks; block++) {
786                 if (blocks[block] != blockAddr)
787                     continue;
788                 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
789                 markStack.drain();
790             }
791         }
792     }
793 }
794 
markCurrentThreadConservativelyInternal(MarkStack & markStack)795 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
796 {
797     void* dummy;
798     void* stackPointer = &dummy;
799     void* stackBase = currentThreadStackBase();
800     markConservatively(markStack, stackPointer, stackBase);
801 }
802 
803 #if COMPILER(GCC)
804 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
805 #else
806 #define REGISTER_BUFFER_ALIGNMENT
807 #endif
808 
markCurrentThreadConservatively(MarkStack & markStack)809 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
810 {
811     // setjmp forces volatile registers onto the stack
812     jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
813 #if COMPILER(MSVC)
814 #pragma warning(push)
815 #pragma warning(disable: 4611)
816 #endif
817     setjmp(registers);
818 #if COMPILER(MSVC)
819 #pragma warning(pop)
820 #endif
821 
822     markCurrentThreadConservativelyInternal(markStack);
823 }
824 
825 #if ENABLE(JSC_MULTIPLE_THREADS)
826 
suspendThread(const PlatformThread & platformThread)827 static inline void suspendThread(const PlatformThread& platformThread)
828 {
829 #if OS(DARWIN)
830     thread_suspend(platformThread);
831 #elif OS(WINDOWS)
832     SuspendThread(platformThread);
833 #else
834 #error Need a way to suspend threads on this platform
835 #endif
836 }
837 
resumeThread(const PlatformThread & platformThread)838 static inline void resumeThread(const PlatformThread& platformThread)
839 {
840 #if OS(DARWIN)
841     thread_resume(platformThread);
842 #elif OS(WINDOWS)
843     ResumeThread(platformThread);
844 #else
845 #error Need a way to resume threads on this platform
846 #endif
847 }
848 
849 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
850 
851 #if OS(DARWIN)
852 
853 #if CPU(X86)
854 typedef i386_thread_state_t PlatformThreadRegisters;
855 #elif CPU(X86_64)
856 typedef x86_thread_state64_t PlatformThreadRegisters;
857 #elif CPU(PPC)
858 typedef ppc_thread_state_t PlatformThreadRegisters;
859 #elif CPU(PPC64)
860 typedef ppc_thread_state64_t PlatformThreadRegisters;
861 #elif CPU(ARM)
862 typedef arm_thread_state_t PlatformThreadRegisters;
863 #else
864 #error Unknown Architecture
865 #endif
866 
867 #elif OS(WINDOWS) && CPU(X86)
868 typedef CONTEXT PlatformThreadRegisters;
869 #else
870 #error Need a thread register struct for this platform
871 #endif
872 
getPlatformThreadRegisters(const PlatformThread & platformThread,PlatformThreadRegisters & regs)873 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
874 {
875 #if OS(DARWIN)
876 
877 #if CPU(X86)
878     unsigned user_count = sizeof(regs)/sizeof(int);
879     thread_state_flavor_t flavor = i386_THREAD_STATE;
880 #elif CPU(X86_64)
881     unsigned user_count = x86_THREAD_STATE64_COUNT;
882     thread_state_flavor_t flavor = x86_THREAD_STATE64;
883 #elif CPU(PPC)
884     unsigned user_count = PPC_THREAD_STATE_COUNT;
885     thread_state_flavor_t flavor = PPC_THREAD_STATE;
886 #elif CPU(PPC64)
887     unsigned user_count = PPC_THREAD_STATE64_COUNT;
888     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
889 #elif CPU(ARM)
890     unsigned user_count = ARM_THREAD_STATE_COUNT;
891     thread_state_flavor_t flavor = ARM_THREAD_STATE;
892 #else
893 #error Unknown Architecture
894 #endif
895 
896     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
897     if (result != KERN_SUCCESS) {
898         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
899                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
900         CRASH();
901     }
902     return user_count * sizeof(usword_t);
903 // end OS(DARWIN)
904 
905 #elif OS(WINDOWS) && CPU(X86)
906     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
907     GetThreadContext(platformThread, &regs);
908     return sizeof(CONTEXT);
909 #else
910 #error Need a way to get thread registers on this platform
911 #endif
912 }
913 
otherThreadStackPointer(const PlatformThreadRegisters & regs)914 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
915 {
916 #if OS(DARWIN)
917 
918 #if __DARWIN_UNIX03
919 
920 #if CPU(X86)
921     return reinterpret_cast<void*>(regs.__esp);
922 #elif CPU(X86_64)
923     return reinterpret_cast<void*>(regs.__rsp);
924 #elif CPU(PPC) || CPU(PPC64)
925     return reinterpret_cast<void*>(regs.__r1);
926 #elif CPU(ARM)
927     return reinterpret_cast<void*>(regs.__sp);
928 #else
929 #error Unknown Architecture
930 #endif
931 
932 #else // !__DARWIN_UNIX03
933 
934 #if CPU(X86)
935     return reinterpret_cast<void*>(regs.esp);
936 #elif CPU(X86_64)
937     return reinterpret_cast<void*>(regs.rsp);
938 #elif CPU(PPC) || CPU(PPC64)
939     return reinterpret_cast<void*>(regs.r1);
940 #else
941 #error Unknown Architecture
942 #endif
943 
944 #endif // __DARWIN_UNIX03
945 
946 // end OS(DARWIN)
947 #elif CPU(X86) && OS(WINDOWS)
948     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
949 #else
950 #error Need a way to get the stack pointer for another thread on this platform
951 #endif
952 }
953 
markOtherThreadConservatively(MarkStack & markStack,Thread * thread)954 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
955 {
956     suspendThread(thread->platformThread);
957 
958     PlatformThreadRegisters regs;
959     size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
960 
961     // mark the thread's registers
962     markConservatively(markStack, static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
963 
964     void* stackPointer = otherThreadStackPointer(regs);
965     markConservatively(markStack, stackPointer, thread->stackBase);
966 
967     resumeThread(thread->platformThread);
968 }
969 
970 #endif
971 
markStackObjectsConservatively(MarkStack & markStack)972 void Heap::markStackObjectsConservatively(MarkStack& markStack)
973 {
974     markCurrentThreadConservatively(markStack);
975 
976 #if ENABLE(JSC_MULTIPLE_THREADS)
977 
978     if (m_currentThreadRegistrar) {
979 
980         MutexLocker lock(m_registeredThreadsMutex);
981 
982 #ifndef NDEBUG
983         // Forbid malloc during the mark phase. Marking a thread suspends it, so
984         // a malloc inside markChildren() would risk a deadlock with a thread that had been
985         // suspended while holding the malloc lock.
986         fastMallocForbid();
987 #endif
988         // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
989         // and since this is a shared heap, they are real locks.
990         for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
991             if (!pthread_equal(thread->posixThread, pthread_self()))
992                 markOtherThreadConservatively(markStack, thread);
993         }
994 #ifndef NDEBUG
995         fastMallocAllow();
996 #endif
997     }
998 #endif
999 }
1000 
protect(JSValue k)1001 void Heap::protect(JSValue k)
1002 {
1003     ASSERT(k);
1004     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1005 
1006     if (!k.isCell())
1007         return;
1008 
1009     m_protectedValues.add(k.asCell());
1010 }
1011 
unprotect(JSValue k)1012 void Heap::unprotect(JSValue k)
1013 {
1014     ASSERT(k);
1015     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
1016 
1017     if (!k.isCell())
1018         return;
1019 
1020     m_protectedValues.remove(k.asCell());
1021 }
1022 
markProtectedObjects(MarkStack & markStack)1023 void Heap::markProtectedObjects(MarkStack& markStack)
1024 {
1025     ProtectCountSet::iterator end = m_protectedValues.end();
1026     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
1027         markStack.append(it->first);
1028         markStack.drain();
1029     }
1030 }
1031 
clearMarkBits()1032 void Heap::clearMarkBits()
1033 {
1034     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
1035         clearMarkBits(m_heap.blocks[i]);
1036 }
1037 
clearMarkBits(CollectorBlock * block)1038 void Heap::clearMarkBits(CollectorBlock* block)
1039 {
1040     // allocate assumes that the last cell in every block is marked.
1041     block->marked.clearAll();
1042     block->marked.set(HeapConstants::cellsPerBlock - 1);
1043 }
1044 
markedCells(size_t startBlock,size_t startCell) const1045 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
1046 {
1047     ASSERT(startBlock <= m_heap.usedBlocks);
1048     ASSERT(startCell < HeapConstants::cellsPerBlock);
1049 
1050     if (startBlock >= m_heap.usedBlocks)
1051         return 0;
1052 
1053     size_t result = 0;
1054     result += m_heap.blocks[startBlock]->marked.count(startCell);
1055     for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
1056         result += m_heap.blocks[i]->marked.count();
1057 
1058     return result;
1059 }
1060 
sweep()1061 void Heap::sweep()
1062 {
1063     ASSERT(m_heap.operationInProgress == NoOperation);
1064     if (m_heap.operationInProgress != NoOperation)
1065         CRASH();
1066     m_heap.operationInProgress = Collection;
1067 
1068 #if !ENABLE(JSC_ZOMBIES)
1069     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
1070 #endif
1071 
1072     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
1073     DeadObjectIterator end(m_heap, m_heap.usedBlocks);
1074     for ( ; it != end; ++it) {
1075         JSCell* cell = *it;
1076 #if ENABLE(JSC_ZOMBIES)
1077         if (!cell->isZombie()) {
1078             const ClassInfo* info = cell->classInfo();
1079             cell->~JSCell();
1080             new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
1081             Heap::markCell(cell);
1082         }
1083 #else
1084         cell->~JSCell();
1085         // Callers of sweep assume it's safe to mark any cell in the heap.
1086         new (cell) JSCell(dummyMarkableCellStructure);
1087 #endif
1088     }
1089 
1090     m_heap.operationInProgress = NoOperation;
1091 }
1092 
markRoots()1093 void Heap::markRoots()
1094 {
1095 #ifndef NDEBUG
1096     if (m_globalData->isSharedInstance) {
1097         ASSERT(JSLock::lockCount() > 0);
1098         ASSERT(JSLock::currentThreadIsHoldingLock());
1099     }
1100 #endif
1101 
1102     ASSERT(m_heap.operationInProgress == NoOperation);
1103     if (m_heap.operationInProgress != NoOperation)
1104         CRASH();
1105 
1106     m_heap.operationInProgress = Collection;
1107 
1108     MarkStack& markStack = m_globalData->markStack;
1109 
1110     // Reset mark bits.
1111     clearMarkBits();
1112 
1113     // Mark stack roots.
1114     markStackObjectsConservatively(markStack);
1115     m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1116 
1117     // Mark explicitly registered roots.
1118     markProtectedObjects(markStack);
1119 
1120     // Mark misc. other roots.
1121     if (m_markListSet && m_markListSet->size())
1122         MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1123     if (m_globalData->exception)
1124         markStack.append(m_globalData->exception);
1125     if (m_globalData->functionCodeBlockBeingReparsed)
1126         m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
1127     if (m_globalData->firstStringifierToMark)
1128         JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1129 
1130     // Mark the small strings cache last, since it will clear itself if nothing
1131     // else has marked it.
1132     m_globalData->smallStrings.markChildren(markStack);
1133 
1134     markStack.drain();
1135     markStack.compact();
1136 
1137     m_heap.operationInProgress = NoOperation;
1138 }
1139 
objectCount() const1140 size_t Heap::objectCount() const
1141 {
1142     return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
1143            + m_heap.nextCell // allocated cells in current block
1144            + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
1145            - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
1146 }
1147 
addToStatistics(Heap::Statistics & statistics) const1148 void Heap::addToStatistics(Heap::Statistics& statistics) const
1149 {
1150     statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
1151     statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
1152 }
1153 
statistics() const1154 Heap::Statistics Heap::statistics() const
1155 {
1156     Statistics statistics = { 0, 0 };
1157     addToStatistics(statistics);
1158     return statistics;
1159 }
1160 
globalObjectCount()1161 size_t Heap::globalObjectCount()
1162 {
1163     size_t count = 0;
1164     if (JSGlobalObject* head = m_globalData->head) {
1165         JSGlobalObject* o = head;
1166         do {
1167             ++count;
1168             o = o->next();
1169         } while (o != head);
1170     }
1171     return count;
1172 }
1173 
protectedGlobalObjectCount()1174 size_t Heap::protectedGlobalObjectCount()
1175 {
1176     size_t count = 0;
1177     if (JSGlobalObject* head = m_globalData->head) {
1178         JSGlobalObject* o = head;
1179         do {
1180             if (m_protectedValues.contains(o))
1181                 ++count;
1182             o = o->next();
1183         } while (o != head);
1184     }
1185 
1186     return count;
1187 }
1188 
protectedObjectCount()1189 size_t Heap::protectedObjectCount()
1190 {
1191     return m_protectedValues.size();
1192 }
1193 
typeName(JSCell * cell)1194 static const char* typeName(JSCell* cell)
1195 {
1196     if (cell->isString())
1197         return "string";
1198 #if USE(JSVALUE32)
1199     if (cell->isNumber())
1200         return "number";
1201 #endif
1202     if (cell->isGetterSetter())
1203         return "Getter-Setter";
1204     if (cell->isAPIValueWrapper())
1205         return "API wrapper";
1206     if (cell->isPropertyNameIterator())
1207         return "For-in iterator";
1208     if (!cell->isObject())
1209         return "[empty cell]";
1210     const ClassInfo* info = cell->classInfo();
1211     return info ? info->className : "Object";
1212 }
1213 
protectedObjectTypeCounts()1214 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1215 {
1216     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1217 
1218     ProtectCountSet::iterator end = m_protectedValues.end();
1219     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1220         counts->add(typeName(it->first));
1221 
1222     return counts;
1223 }
1224 
objectTypeCounts()1225 HashCountedSet<const char*>* Heap::objectTypeCounts()
1226 {
1227     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1228 
1229     LiveObjectIterator it = primaryHeapBegin();
1230     LiveObjectIterator heapEnd = primaryHeapEnd();
1231     for ( ; it != heapEnd; ++it)
1232         counts->add(typeName(*it));
1233 
1234     return counts;
1235 }
1236 
isBusy()1237 bool Heap::isBusy()
1238 {
1239     return m_heap.operationInProgress != NoOperation;
1240 }
1241 
reset()1242 void Heap::reset()
1243 {
1244     JAVASCRIPTCORE_GC_BEGIN();
1245 
1246     markRoots();
1247 
1248     JAVASCRIPTCORE_GC_MARKED();
1249 
1250     m_heap.nextCell = 0;
1251     m_heap.nextBlock = 0;
1252     m_heap.nextNumber = 0;
1253     m_heap.extraCost = 0;
1254 #if ENABLE(JSC_ZOMBIES)
1255     sweep();
1256 #endif
1257     resizeBlocks();
1258 
1259     JAVASCRIPTCORE_GC_END();
1260 }
1261 
collectAllGarbage()1262 void Heap::collectAllGarbage()
1263 {
1264     JAVASCRIPTCORE_GC_BEGIN();
1265 
1266     // If the last iteration through the heap deallocated blocks, we need
1267     // to clean up remaining garbage before marking. Otherwise, the conservative
1268     // marking mechanism might follow a pointer to unmapped memory.
1269     if (m_heap.didShrink)
1270         sweep();
1271 
1272     markRoots();
1273 
1274     JAVASCRIPTCORE_GC_MARKED();
1275 
1276     m_heap.nextCell = 0;
1277     m_heap.nextBlock = 0;
1278     m_heap.nextNumber = 0;
1279     m_heap.extraCost = 0;
1280     sweep();
1281     resizeBlocks();
1282 
1283     JAVASCRIPTCORE_GC_END();
1284 }
1285 
primaryHeapBegin()1286 LiveObjectIterator Heap::primaryHeapBegin()
1287 {
1288     return LiveObjectIterator(m_heap, 0);
1289 }
1290 
primaryHeapEnd()1291 LiveObjectIterator Heap::primaryHeapEnd()
1292 {
1293     return LiveObjectIterator(m_heap, m_heap.usedBlocks);
1294 }
1295 
1296 } // namespace JSC
1297