• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 #include "config.h"
22 #include "Collector.h"
23 
24 #include "ArgList.h"
25 #include "CallFrame.h"
26 #include "CollectorHeapIterator.h"
27 #include "Interpreter.h"
28 #include "JSGlobalObject.h"
29 #include "JSLock.h"
30 #include "JSONObject.h"
31 #include "JSString.h"
32 #include "JSValue.h"
33 #include "MarkStack.h"
34 #include "Nodes.h"
35 #include "Tracing.h"
36 #include <algorithm>
37 #include <limits.h>
38 #include <setjmp.h>
39 #include <stdlib.h>
40 #include <wtf/FastMalloc.h>
41 #include <wtf/HashCountedSet.h>
42 #include <wtf/UnusedParam.h>
43 #include <wtf/VMTags.h>
44 
45 #if PLATFORM(DARWIN)
46 
47 #include <mach/mach_init.h>
48 #include <mach/mach_port.h>
49 #include <mach/task.h>
50 #include <mach/thread_act.h>
51 #include <mach/vm_map.h>
52 
53 #elif PLATFORM(SYMBIAN)
54 #include <e32std.h>
55 #include <e32cmn.h>
56 #include <unistd.h>
57 
58 #elif PLATFORM(WIN_OS)
59 
60 #include <windows.h>
61 
62 #elif PLATFORM(UNIX)
63 
64 #include <stdlib.h>
65 #include <sys/mman.h>
66 #include <unistd.h>
67 
68 #if PLATFORM(SOLARIS)
69 #include <thread.h>
70 #else
71 #include <pthread.h>
72 #endif
73 
74 #if HAVE(PTHREAD_NP_H)
75 #include <pthread_np.h>
76 #endif
77 
78 #endif
79 
80 #define DEBUG_COLLECTOR 0
81 #define COLLECT_ON_EVERY_ALLOCATION 0
82 
83 using std::max;
84 
85 namespace JSC {
86 
87 // tunable parameters
88 
89 const size_t SPARE_EMPTY_BLOCKS = 2;
90 const size_t GROWTH_FACTOR = 2;
91 const size_t LOW_WATER_FACTOR = 4;
92 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
93 // This value has to be a macro to be used in max() without introducing
94 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
95 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
96 
97 #if PLATFORM(SYMBIAN)
98 const size_t MAX_NUM_BLOCKS = 256; // Max size of collector heap set to 16 MB
99 static RHeap* userChunk = 0;
100 #endif
101 
102 static void freeHeap(CollectorHeap*);
103 
104 #if ENABLE(JSC_MULTIPLE_THREADS)
105 
106 #if PLATFORM(DARWIN)
107 typedef mach_port_t PlatformThread;
108 #elif PLATFORM(WIN_OS)
109 struct PlatformThread {
PlatformThreadJSC::PlatformThread110     PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
111     DWORD id;
112     HANDLE handle;
113 };
114 #endif
115 
116 class Heap::Thread {
117 public:
Thread(pthread_t pthread,const PlatformThread & platThread,void * base)118     Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
119         : posixThread(pthread)
120         , platformThread(platThread)
121         , stackBase(base)
122     {
123     }
124 
125     Thread* next;
126     pthread_t posixThread;
127     PlatformThread platformThread;
128     void* stackBase;
129 };
130 
131 #endif
132 
Heap(JSGlobalData * globalData)133 Heap::Heap(JSGlobalData* globalData)
134     : m_markListSet(0)
135 #if ENABLE(JSC_MULTIPLE_THREADS)
136     , m_registeredThreads(0)
137     , m_currentThreadRegistrar(0)
138 #endif
139     , m_globalData(globalData)
140 {
141     ASSERT(globalData);
142 
143 #if PLATFORM(SYMBIAN)
144     // Symbian OpenC supports mmap but currently not the MAP_ANON flag.
145     // Using fastMalloc() does not properly align blocks on 64k boundaries
146     // and previous implementation was flawed/incomplete.
147     // UserHeap::ChunkHeap allows allocation of continuous memory and specification
148     // of alignment value for (symbian) cells within that heap.
149     //
150     // Clarification and mapping of terminology:
151     // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk,
152     // which can dynamically grow up to 8 MB,
153     // that holds all CollectorBlocks of this session (static).
154     // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock.
155     // JSCell objects are maintained as usual within CollectorBlocks.
156     if (!userChunk) {
157         userChunk = UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
158         if (!userChunk)
159             CRASH();
160     }
161 #endif // PLATFORM(SYMBIAN)
162 
163     memset(&primaryHeap, 0, sizeof(CollectorHeap));
164     memset(&numberHeap, 0, sizeof(CollectorHeap));
165 }
166 
~Heap()167 Heap::~Heap()
168 {
169     // The destroy function must already have been called, so assert this.
170     ASSERT(!m_globalData);
171 }
172 
destroy()173 void Heap::destroy()
174 {
175     JSLock lock(SilenceAssertionsOnly);
176 
177     if (!m_globalData)
178         return;
179 
180     // The global object is not GC protected at this point, so sweeping may delete it
181     // (and thus the global data) before other objects that may use the global data.
182     RefPtr<JSGlobalData> protect(m_globalData);
183 
184     delete m_markListSet;
185     m_markListSet = 0;
186 
187     sweep<PrimaryHeap>();
188     // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
189 
190     ASSERT(!primaryHeap.numLiveObjects);
191 
192     freeHeap(&primaryHeap);
193     freeHeap(&numberHeap);
194 
195 #if ENABLE(JSC_MULTIPLE_THREADS)
196     if (m_currentThreadRegistrar) {
197         int error = pthread_key_delete(m_currentThreadRegistrar);
198         ASSERT_UNUSED(error, !error);
199     }
200 
201     MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
202     for (Heap::Thread* t = m_registeredThreads; t;) {
203         Heap::Thread* next = t->next;
204         delete t;
205         t = next;
206     }
207 #endif
208 
209     m_globalData = 0;
210 }
211 
212 template <HeapType heapType>
allocateBlock()213 static NEVER_INLINE CollectorBlock* allocateBlock()
214 {
215 #if PLATFORM(DARWIN)
216     vm_address_t address = 0;
217     // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
218     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
219 #elif PLATFORM(SYMBIAN)
220     // Allocate a 64 kb aligned CollectorBlock
221     unsigned char* mask = reinterpret_cast<unsigned char*>(userChunk->Alloc(BLOCK_SIZE));
222     if (!mask)
223         CRASH();
224     uintptr_t address = reinterpret_cast<uintptr_t>(mask);
225 
226     memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
227 #elif PLATFORM(WIN_OS)
228      // windows virtual address granularity is naturally 64k
229     LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
230 #elif HAVE(POSIX_MEMALIGN)
231     void* address;
232     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
233     memset(address, 0, BLOCK_SIZE);
234 #else
235 
236 #if ENABLE(JSC_MULTIPLE_THREADS)
237 #error Need to initialize pagesize safely.
238 #endif
239     static size_t pagesize = getpagesize();
240 
241     size_t extra = 0;
242     if (BLOCK_SIZE > pagesize)
243         extra = BLOCK_SIZE - pagesize;
244 
245     void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
246     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
247 
248     size_t adjust = 0;
249     if ((address & BLOCK_OFFSET_MASK) != 0)
250         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
251 
252     if (adjust > 0)
253         munmap(reinterpret_cast<char*>(address), adjust);
254 
255     if (adjust < extra)
256         munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
257 
258     address += adjust;
259     memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
260 #endif
261     reinterpret_cast<CollectorBlock*>(address)->type = heapType;
262     return reinterpret_cast<CollectorBlock*>(address);
263 }
264 
freeBlock(CollectorBlock * block)265 static void freeBlock(CollectorBlock* block)
266 {
267 #if PLATFORM(DARWIN)
268     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
269 #elif PLATFORM(SYMBIAN)
270     userChunk->Free(reinterpret_cast<TAny*>(block));
271 #elif PLATFORM(WIN_OS)
272     VirtualFree(block, 0, MEM_RELEASE);
273 #elif HAVE(POSIX_MEMALIGN)
274     free(block);
275 #else
276     munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
277 #endif
278 }
279 
freeHeap(CollectorHeap * heap)280 static void freeHeap(CollectorHeap* heap)
281 {
282     for (size_t i = 0; i < heap->usedBlocks; ++i)
283         if (heap->blocks[i])
284             freeBlock(heap->blocks[i]);
285     fastFree(heap->blocks);
286     memset(heap, 0, sizeof(CollectorHeap));
287 }
288 
recordExtraCost(size_t cost)289 void Heap::recordExtraCost(size_t cost)
290 {
291     // Our frequency of garbage collection tries to balance memory use against speed
292     // by collecting based on the number of newly created values. However, for values
293     // that hold on to a great deal of memory that's not in the form of other JS values,
294     // that is not good enough - in some cases a lot of those objects can pile up and
295     // use crazy amounts of memory without a GC happening. So we track these extra
296     // memory costs. Only unusually large objects are noted, and we only keep track
297     // of this extra cost until the next GC. In garbage collected languages, most values
298     // are either very short lived temporaries, or have extremely long lifetimes. So
299     // if a large value survives one garbage collection, there is not much point to
300     // collecting more frequently as long as it stays alive.
301     // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
302 
303     primaryHeap.extraCost += cost;
304 }
305 
heapAllocate(size_t s)306 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
307 {
308     typedef typename HeapConstants<heapType>::Block Block;
309     typedef typename HeapConstants<heapType>::Cell Cell;
310 
311     CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
312     ASSERT(JSLock::lockCount() > 0);
313     ASSERT(JSLock::currentThreadIsHoldingLock());
314     ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize);
315 
316     ASSERT(heap.operationInProgress == NoOperation);
317     ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
318     // FIXME: If another global variable access here doesn't hurt performance
319     // too much, we could CRASH() in NDEBUG builds, which could help ensure we
320     // don't spend any time debugging cases where we allocate inside an object's
321     // deallocation code.
322 
323 #if COLLECT_ON_EVERY_ALLOCATION
324     collect();
325 #endif
326 
327     size_t numLiveObjects = heap.numLiveObjects;
328     size_t usedBlocks = heap.usedBlocks;
329     size_t i = heap.firstBlockWithPossibleSpace;
330 
331     // if we have a huge amount of extra cost, we'll try to collect even if we still have
332     // free cells left.
333     if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
334         size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
335         size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
336         const size_t newCost = numNewObjects + heap.extraCost;
337         if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
338             goto collect;
339     }
340 
341     ASSERT(heap.operationInProgress == NoOperation);
342 #ifndef NDEBUG
343     // FIXME: Consider doing this in NDEBUG builds too (see comment above).
344     heap.operationInProgress = Allocation;
345 #endif
346 
347 scan:
348     Block* targetBlock;
349     size_t targetBlockUsedCells;
350     if (i != usedBlocks) {
351         targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
352         targetBlockUsedCells = targetBlock->usedCells;
353         ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
354         while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
355             if (++i == usedBlocks)
356                 goto collect;
357             targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
358             targetBlockUsedCells = targetBlock->usedCells;
359             ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
360         }
361         heap.firstBlockWithPossibleSpace = i;
362     } else {
363 
364 collect:
365         size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
366         size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
367         const size_t newCost = numNewObjects + heap.extraCost;
368 
369         if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
370 #ifndef NDEBUG
371             heap.operationInProgress = NoOperation;
372 #endif
373             bool collected = collect();
374 #ifndef NDEBUG
375             heap.operationInProgress = Allocation;
376 #endif
377             if (collected) {
378                 numLiveObjects = heap.numLiveObjects;
379                 usedBlocks = heap.usedBlocks;
380                 i = heap.firstBlockWithPossibleSpace;
381                 goto scan;
382             }
383         }
384 
385         // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
386         size_t numBlocks = heap.numBlocks;
387         if (usedBlocks == numBlocks) {
388             static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
389             if (numBlocks > maxNumBlocks)
390                 CRASH();
391             numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
392             heap.numBlocks = numBlocks;
393             heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
394         }
395 
396         targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>());
397         targetBlock->freeList = targetBlock->cells;
398         targetBlock->heap = this;
399         targetBlockUsedCells = 0;
400         heap.blocks[usedBlocks] = reinterpret_cast<CollectorBlock*>(targetBlock);
401         heap.usedBlocks = usedBlocks + 1;
402         heap.firstBlockWithPossibleSpace = usedBlocks;
403     }
404 
405     // find a free spot in the block and detach it from the free list
406     Cell* newCell = targetBlock->freeList;
407 
408     // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
409     targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
410 
411     targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
412     heap.numLiveObjects = numLiveObjects + 1;
413 
414 #ifndef NDEBUG
415     // FIXME: Consider doing this in NDEBUG builds too (see comment above).
416     heap.operationInProgress = NoOperation;
417 #endif
418 
419     return newCell;
420 }
421 
allocate(size_t s)422 void* Heap::allocate(size_t s)
423 {
424     return heapAllocate<PrimaryHeap>(s);
425 }
426 
allocateNumber(size_t s)427 void* Heap::allocateNumber(size_t s)
428 {
429     return heapAllocate<NumberHeap>(s);
430 }
431 
432 #if PLATFORM(WINCE)
433 void* g_stackBase = 0;
434 
isPageWritable(void * page)435 inline bool isPageWritable(void* page)
436 {
437     MEMORY_BASIC_INFORMATION memoryInformation;
438     DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
439 
440     // return false on error, including ptr outside memory
441     if (result != sizeof(memoryInformation))
442         return false;
443 
444     DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
445     return protect == PAGE_READWRITE
446         || protect == PAGE_WRITECOPY
447         || protect == PAGE_EXECUTE_READWRITE
448         || protect == PAGE_EXECUTE_WRITECOPY;
449 }
450 
getStackBase(void * previousFrame)451 static void* getStackBase(void* previousFrame)
452 {
453     // find the address of this stack frame by taking the address of a local variable
454     bool isGrowingDownward;
455     void* thisFrame = (void*)(&isGrowingDownward);
456 
457     isGrowingDownward = previousFrame < &thisFrame;
458     static DWORD pageSize = 0;
459     if (!pageSize) {
460         SYSTEM_INFO systemInfo;
461         GetSystemInfo(&systemInfo);
462         pageSize = systemInfo.dwPageSize;
463     }
464 
465     // scan all of memory starting from this frame, and return the last writeable page found
466     register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
467     if (isGrowingDownward) {
468         while (currentPage > 0) {
469             // check for underflow
470             if (currentPage >= (char*)pageSize)
471                 currentPage -= pageSize;
472             else
473                 currentPage = 0;
474             if (!isPageWritable(currentPage))
475                 return currentPage + pageSize;
476         }
477         return 0;
478     } else {
479         while (true) {
480             // guaranteed to complete because isPageWritable returns false at end of memory
481             currentPage += pageSize;
482             if (!isPageWritable(currentPage))
483                 return currentPage;
484         }
485     }
486 }
487 #endif
488 
currentThreadStackBase()489 static inline void* currentThreadStackBase()
490 {
491 #if PLATFORM(DARWIN)
492     pthread_t thread = pthread_self();
493     return pthread_get_stackaddr_np(thread);
494 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
495     // offset 0x18 from the FS segment register gives a pointer to
496     // the thread information block for the current thread
497     NT_TIB* pTib;
498     __asm {
499         MOV EAX, FS:[18h]
500         MOV pTib, EAX
501     }
502     return static_cast<void*>(pTib->StackBase);
503 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
504     PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
505     return reinterpret_cast<void*>(pTib->StackBase);
506 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
507     // offset 0x18 from the FS segment register gives a pointer to
508     // the thread information block for the current thread
509     NT_TIB* pTib;
510     asm ( "movl %%fs:0x18, %0\n"
511           : "=r" (pTib)
512         );
513     return static_cast<void*>(pTib->StackBase);
514 #elif PLATFORM(SOLARIS)
515     stack_t s;
516     thr_stksegment(&s);
517     return s.ss_sp;
518 #elif PLATFORM(OPENBSD)
519     pthread_t thread = pthread_self();
520     stack_t stack;
521     pthread_stackseg_np(thread, &stack);
522     return stack.ss_sp;
523 #elif PLATFORM(SYMBIAN)
524     static void* stackBase = 0;
525     if (stackBase == 0) {
526         TThreadStackInfo info;
527         RThread thread;
528         thread.StackInfo(info);
529         stackBase = (void*)info.iBase;
530     }
531     return (void*)stackBase;
532 #elif PLATFORM(UNIX)
533     static void* stackBase = 0;
534     static size_t stackSize = 0;
535     static pthread_t stackThread;
536     pthread_t thread = pthread_self();
537     if (stackBase == 0 || thread != stackThread) {
538         pthread_attr_t sattr;
539         pthread_attr_init(&sattr);
540 #if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD)
541         // e.g. on FreeBSD 5.4, neundorf@kde.org
542         pthread_attr_get_np(thread, &sattr);
543 #else
544         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
545         pthread_getattr_np(thread, &sattr);
546 #endif
547         int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
548         (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
549         ASSERT(stackBase);
550         pthread_attr_destroy(&sattr);
551         stackThread = thread;
552     }
553     return static_cast<char*>(stackBase) + stackSize;
554 #elif PLATFORM(WINCE)
555     if (g_stackBase)
556         return g_stackBase;
557     else {
558         int dummy;
559         return getStackBase(&dummy);
560     }
561 #else
562 #error Need a way to get the stack base on this platform
563 #endif
564 }
565 
566 #if ENABLE(JSC_MULTIPLE_THREADS)
567 
getCurrentPlatformThread()568 static inline PlatformThread getCurrentPlatformThread()
569 {
570 #if PLATFORM(DARWIN)
571     return pthread_mach_thread_np(pthread_self());
572 #elif PLATFORM(WIN_OS)
573     HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
574     return PlatformThread(GetCurrentThreadId(), threadHandle);
575 #endif
576 }
577 
makeUsableFromMultipleThreads()578 void Heap::makeUsableFromMultipleThreads()
579 {
580     if (m_currentThreadRegistrar)
581         return;
582 
583     int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
584     if (error)
585         CRASH();
586 }
587 
registerThread()588 void Heap::registerThread()
589 {
590     if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
591         return;
592 
593     pthread_setspecific(m_currentThreadRegistrar, this);
594     Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
595 
596     MutexLocker lock(m_registeredThreadsMutex);
597 
598     thread->next = m_registeredThreads;
599     m_registeredThreads = thread;
600 }
601 
unregisterThread(void * p)602 void Heap::unregisterThread(void* p)
603 {
604     if (p)
605         static_cast<Heap*>(p)->unregisterThread();
606 }
607 
unregisterThread()608 void Heap::unregisterThread()
609 {
610     pthread_t currentPosixThread = pthread_self();
611 
612     MutexLocker lock(m_registeredThreadsMutex);
613 
614     if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
615         Thread* t = m_registeredThreads;
616         m_registeredThreads = m_registeredThreads->next;
617         delete t;
618     } else {
619         Heap::Thread* last = m_registeredThreads;
620         Heap::Thread* t;
621         for (t = m_registeredThreads->next; t; t = t->next) {
622             if (pthread_equal(t->posixThread, currentPosixThread)) {
623                 last->next = t->next;
624                 break;
625             }
626             last = t;
627         }
628         ASSERT(t); // If t is NULL, we never found ourselves in the list.
629         delete t;
630     }
631 }
632 
633 #else // ENABLE(JSC_MULTIPLE_THREADS)
634 
registerThread()635 void Heap::registerThread()
636 {
637 }
638 
639 #endif
640 
641 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
642 
643 // cell size needs to be a power of two for this to be valid
644 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
645 
markConservatively(MarkStack & markStack,void * start,void * end)646 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
647 {
648     if (start > end) {
649         void* tmp = start;
650         start = end;
651         end = tmp;
652     }
653 
654     ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
655     ASSERT(IS_POINTER_ALIGNED(start));
656     ASSERT(IS_POINTER_ALIGNED(end));
657 
658     char** p = static_cast<char**>(start);
659     char** e = static_cast<char**>(end);
660 
661     size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
662     size_t usedNumberBlocks = numberHeap.usedBlocks;
663     CollectorBlock** primaryBlocks = primaryHeap.blocks;
664     CollectorBlock** numberBlocks = numberHeap.blocks;
665 
666     const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
667 
668     while (p != e) {
669         char* x = *p++;
670         if (IS_HALF_CELL_ALIGNED(x) && x) {
671             uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
672             xAsBits &= CELL_ALIGN_MASK;
673             uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
674             CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
675             // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
676             for (size_t block = 0; block < usedNumberBlocks; block++) {
677                 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
678                     Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
679                     goto endMarkLoop;
680                 }
681             }
682 
683             // Mark the primary heap
684             for (size_t block = 0; block < usedPrimaryBlocks; block++) {
685                 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
686                     if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree != 0) {
687                         markStack.append(reinterpret_cast<JSCell*>(xAsBits));
688                         markStack.drain();
689                     }
690                     break;
691                 }
692             }
693         endMarkLoop:
694             ;
695         }
696     }
697 }
698 
markCurrentThreadConservativelyInternal(MarkStack & markStack)699 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
700 {
701     void* dummy;
702     void* stackPointer = &dummy;
703     void* stackBase = currentThreadStackBase();
704     markConservatively(markStack, stackPointer, stackBase);
705 }
706 
markCurrentThreadConservatively(MarkStack & markStack)707 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
708 {
709     // setjmp forces volatile registers onto the stack
710     jmp_buf registers;
711 #if COMPILER(MSVC)
712 #pragma warning(push)
713 #pragma warning(disable: 4611)
714 #endif
715     setjmp(registers);
716 #if COMPILER(MSVC)
717 #pragma warning(pop)
718 #endif
719 
720     markCurrentThreadConservativelyInternal(markStack);
721 }
722 
723 #if ENABLE(JSC_MULTIPLE_THREADS)
724 
suspendThread(const PlatformThread & platformThread)725 static inline void suspendThread(const PlatformThread& platformThread)
726 {
727 #if PLATFORM(DARWIN)
728     thread_suspend(platformThread);
729 #elif PLATFORM(WIN_OS)
730     SuspendThread(platformThread.handle);
731 #else
732 #error Need a way to suspend threads on this platform
733 #endif
734 }
735 
resumeThread(const PlatformThread & platformThread)736 static inline void resumeThread(const PlatformThread& platformThread)
737 {
738 #if PLATFORM(DARWIN)
739     thread_resume(platformThread);
740 #elif PLATFORM(WIN_OS)
741     ResumeThread(platformThread.handle);
742 #else
743 #error Need a way to resume threads on this platform
744 #endif
745 }
746 
747 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
748 
749 #if PLATFORM(DARWIN)
750 
751 #if PLATFORM(X86)
752 typedef i386_thread_state_t PlatformThreadRegisters;
753 #elif PLATFORM(X86_64)
754 typedef x86_thread_state64_t PlatformThreadRegisters;
755 #elif PLATFORM(PPC)
756 typedef ppc_thread_state_t PlatformThreadRegisters;
757 #elif PLATFORM(PPC64)
758 typedef ppc_thread_state64_t PlatformThreadRegisters;
759 #elif PLATFORM(ARM)
760 typedef arm_thread_state_t PlatformThreadRegisters;
761 #else
762 #error Unknown Architecture
763 #endif
764 
765 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
766 typedef CONTEXT PlatformThreadRegisters;
767 #else
768 #error Need a thread register struct for this platform
769 #endif
770 
getPlatformThreadRegisters(const PlatformThread & platformThread,PlatformThreadRegisters & regs)771 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
772 {
773 #if PLATFORM(DARWIN)
774 
775 #if PLATFORM(X86)
776     unsigned user_count = sizeof(regs)/sizeof(int);
777     thread_state_flavor_t flavor = i386_THREAD_STATE;
778 #elif PLATFORM(X86_64)
779     unsigned user_count = x86_THREAD_STATE64_COUNT;
780     thread_state_flavor_t flavor = x86_THREAD_STATE64;
781 #elif PLATFORM(PPC)
782     unsigned user_count = PPC_THREAD_STATE_COUNT;
783     thread_state_flavor_t flavor = PPC_THREAD_STATE;
784 #elif PLATFORM(PPC64)
785     unsigned user_count = PPC_THREAD_STATE64_COUNT;
786     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
787 #elif PLATFORM(ARM)
788     unsigned user_count = ARM_THREAD_STATE_COUNT;
789     thread_state_flavor_t flavor = ARM_THREAD_STATE;
790 #else
791 #error Unknown Architecture
792 #endif
793 
794     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
795     if (result != KERN_SUCCESS) {
796         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
797                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
798         CRASH();
799     }
800     return user_count * sizeof(usword_t);
801 // end PLATFORM(DARWIN)
802 
803 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
804     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
805     GetThreadContext(platformThread.handle, &regs);
806     return sizeof(CONTEXT);
807 #else
808 #error Need a way to get thread registers on this platform
809 #endif
810 }
811 
otherThreadStackPointer(const PlatformThreadRegisters & regs)812 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
813 {
814 #if PLATFORM(DARWIN)
815 
816 #if __DARWIN_UNIX03
817 
818 #if PLATFORM(X86)
819     return reinterpret_cast<void*>(regs.__esp);
820 #elif PLATFORM(X86_64)
821     return reinterpret_cast<void*>(regs.__rsp);
822 #elif PLATFORM(PPC) || PLATFORM(PPC64)
823     return reinterpret_cast<void*>(regs.__r1);
824 #elif PLATFORM(ARM)
825     return reinterpret_cast<void*>(regs.__sp);
826 #else
827 #error Unknown Architecture
828 #endif
829 
830 #else // !__DARWIN_UNIX03
831 
832 #if PLATFORM(X86)
833     return reinterpret_cast<void*>(regs.esp);
834 #elif PLATFORM(X86_64)
835     return reinterpret_cast<void*>(regs.rsp);
836 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
837     return reinterpret_cast<void*>(regs.r1);
838 #else
839 #error Unknown Architecture
840 #endif
841 
842 #endif // __DARWIN_UNIX03
843 
844 // end PLATFORM(DARWIN)
845 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
846     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
847 #else
848 #error Need a way to get the stack pointer for another thread on this platform
849 #endif
850 }
851 
markOtherThreadConservatively(MarkStack & markStack,Thread * thread)852 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
853 {
854     suspendThread(thread->platformThread);
855 
856     PlatformThreadRegisters regs;
857     size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
858 
859     // mark the thread's registers
860     markConservatively(markStack, static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
861 
862     void* stackPointer = otherThreadStackPointer(regs);
863     markConservatively(markStack, stackPointer, thread->stackBase);
864 
865     resumeThread(thread->platformThread);
866 }
867 
868 #endif
869 
markStackObjectsConservatively(MarkStack & markStack)870 void Heap::markStackObjectsConservatively(MarkStack& markStack)
871 {
872     markCurrentThreadConservatively(markStack);
873 
874 #if ENABLE(JSC_MULTIPLE_THREADS)
875 
876     if (m_currentThreadRegistrar) {
877 
878         MutexLocker lock(m_registeredThreadsMutex);
879 
880 #ifndef NDEBUG
881         // Forbid malloc during the mark phase. Marking a thread suspends it, so
882         // a malloc inside markChildren() would risk a deadlock with a thread that had been
883         // suspended while holding the malloc lock.
884         fastMallocForbid();
885 #endif
886         // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
887         // and since this is a shared heap, they are real locks.
888         for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
889             if (!pthread_equal(thread->posixThread, pthread_self()))
890                 markOtherThreadConservatively(markStack, thread);
891         }
892 #ifndef NDEBUG
893         fastMallocAllow();
894 #endif
895     }
896 #endif
897 }
898 
setGCProtectNeedsLocking()899 void Heap::setGCProtectNeedsLocking()
900 {
901     // Most clients do not need to call this, with the notable exception of WebCore.
902     // Clients that use shared heap have JSLock protection, while others are supposed
903     // to do explicit locking. WebCore violates this contract in Database code,
904     // which calls gcUnprotect from a secondary thread.
905     if (!m_protectedValuesMutex)
906         m_protectedValuesMutex.set(new Mutex);
907 }
908 
protect(JSValue k)909 void Heap::protect(JSValue k)
910 {
911     ASSERT(k);
912     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
913 
914     if (!k.isCell())
915         return;
916 
917     if (m_protectedValuesMutex)
918         m_protectedValuesMutex->lock();
919 
920     m_protectedValues.add(k.asCell());
921 
922     if (m_protectedValuesMutex)
923         m_protectedValuesMutex->unlock();
924 }
925 
unprotect(JSValue k)926 void Heap::unprotect(JSValue k)
927 {
928     ASSERT(k);
929     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
930 
931     if (!k.isCell())
932         return;
933 
934     if (m_protectedValuesMutex)
935         m_protectedValuesMutex->lock();
936 
937     m_protectedValues.remove(k.asCell());
938 
939     if (m_protectedValuesMutex)
940         m_protectedValuesMutex->unlock();
941 }
942 
heap(JSValue v)943 Heap* Heap::heap(JSValue v)
944 {
945     if (!v.isCell())
946         return 0;
947     return Heap::cellBlock(v.asCell())->heap;
948 }
949 
markProtectedObjects(MarkStack & markStack)950 void Heap::markProtectedObjects(MarkStack& markStack)
951 {
952     if (m_protectedValuesMutex)
953         m_protectedValuesMutex->lock();
954 
955     ProtectCountSet::iterator end = m_protectedValues.end();
956     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
957         JSCell* val = it->first;
958         if (!val->marked()) {
959             markStack.append(val);
960             markStack.drain();
961         }
962     }
963 
964     if (m_protectedValuesMutex)
965         m_protectedValuesMutex->unlock();
966 }
967 
sweep()968 template <HeapType heapType> size_t Heap::sweep()
969 {
970     typedef typename HeapConstants<heapType>::Block Block;
971     typedef typename HeapConstants<heapType>::Cell Cell;
972 
973     // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
974     CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
975 
976     size_t emptyBlocks = 0;
977     size_t numLiveObjects = heap.numLiveObjects;
978 
979     for (size_t block = 0; block < heap.usedBlocks; block++) {
980         Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]);
981 
982         size_t usedCells = curBlock->usedCells;
983         Cell* freeList = curBlock->freeList;
984 
985         if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
986             // special case with a block where all cells are used -- testing indicates this happens often
987             for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
988                 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
989                     Cell* cell = curBlock->cells + i;
990 
991                     if (heapType != NumberHeap) {
992                         JSCell* imp = reinterpret_cast<JSCell*>(cell);
993                         // special case for allocated but uninitialized object
994                         // (We don't need this check earlier because nothing prior this point
995                         // assumes the object has a valid vptr.)
996                         if (cell->u.freeCell.zeroIfFree == 0)
997                             continue;
998 
999                         imp->~JSCell();
1000                     }
1001 
1002                     --usedCells;
1003                     --numLiveObjects;
1004 
1005                     // put cell on the free list
1006                     cell->u.freeCell.zeroIfFree = 0;
1007                     cell->u.freeCell.next = freeList - (cell + 1);
1008                     freeList = cell;
1009                 }
1010             }
1011         } else {
1012             size_t minimumCellsToProcess = usedCells;
1013             for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
1014                 Cell* cell = curBlock->cells + i;
1015                 if (cell->u.freeCell.zeroIfFree == 0) {
1016                     ++minimumCellsToProcess;
1017                 } else {
1018                     if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
1019                         if (heapType != NumberHeap) {
1020                             JSCell* imp = reinterpret_cast<JSCell*>(cell);
1021                             imp->~JSCell();
1022                         }
1023                         --usedCells;
1024                         --numLiveObjects;
1025 
1026                         // put cell on the free list
1027                         cell->u.freeCell.zeroIfFree = 0;
1028                         cell->u.freeCell.next = freeList - (cell + 1);
1029                         freeList = cell;
1030                     }
1031                 }
1032             }
1033         }
1034 
1035         curBlock->usedCells = static_cast<uint32_t>(usedCells);
1036         curBlock->freeList = freeList;
1037         curBlock->marked.clearAll();
1038 
1039         if (usedCells == 0) {
1040             emptyBlocks++;
1041             if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
1042 #if !DEBUG_COLLECTOR
1043                 freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));
1044 #endif
1045                 // swap with the last block so we compact as we go
1046                 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
1047                 heap.usedBlocks--;
1048                 block--; // Don't move forward a step in this case
1049 
1050                 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
1051                     heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
1052                     heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
1053                 }
1054             }
1055         }
1056     }
1057 
1058     if (heap.numLiveObjects != numLiveObjects)
1059         heap.firstBlockWithPossibleSpace = 0;
1060 
1061     heap.numLiveObjects = numLiveObjects;
1062     heap.numLiveObjectsAtLastCollect = numLiveObjects;
1063     heap.extraCost = 0;
1064     return numLiveObjects;
1065 }
1066 
collect()1067 bool Heap::collect()
1068 {
1069 #ifndef NDEBUG
1070     if (m_globalData->isSharedInstance) {
1071         ASSERT(JSLock::lockCount() > 0);
1072         ASSERT(JSLock::currentThreadIsHoldingLock());
1073     }
1074 #endif
1075 
1076     ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
1077     if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
1078         CRASH();
1079 
1080     JAVASCRIPTCORE_GC_BEGIN();
1081     primaryHeap.operationInProgress = Collection;
1082     numberHeap.operationInProgress = Collection;
1083 
1084     // MARK: first mark all referenced objects recursively starting out from the set of root objects
1085     MarkStack& markStack = m_globalData->markStack;
1086     markStackObjectsConservatively(markStack);
1087     markProtectedObjects(markStack);
1088     if (m_markListSet && m_markListSet->size())
1089         MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1090     if (m_globalData->exception && !m_globalData->exception.marked())
1091         markStack.append(m_globalData->exception);
1092     m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1093     m_globalData->smallStrings.mark();
1094     if (m_globalData->scopeNodeBeingReparsed)
1095         m_globalData->scopeNodeBeingReparsed->markAggregate(markStack);
1096     if (m_globalData->firstStringifierToMark)
1097         JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1098 
1099     markStack.drain();
1100     markStack.compact();
1101     JAVASCRIPTCORE_GC_MARKED();
1102 
1103     size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
1104     size_t numLiveObjects = sweep<PrimaryHeap>();
1105     numLiveObjects += sweep<NumberHeap>();
1106 
1107     primaryHeap.operationInProgress = NoOperation;
1108     numberHeap.operationInProgress = NoOperation;
1109     JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects);
1110 
1111     return numLiveObjects < originalLiveObjects;
1112 }
1113 
objectCount()1114 size_t Heap::objectCount()
1115 {
1116     return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count();
1117 }
1118 
1119 template <HeapType heapType>
addToStatistics(Heap::Statistics & statistics,const CollectorHeap & heap)1120 static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap)
1121 {
1122     typedef HeapConstants<heapType> HC;
1123     for (size_t i = 0; i < heap.usedBlocks; ++i) {
1124         if (heap.blocks[i]) {
1125             statistics.size += BLOCK_SIZE;
1126             statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize;
1127         }
1128     }
1129 }
1130 
statistics() const1131 Heap::Statistics Heap::statistics() const
1132 {
1133     Statistics statistics = { 0, 0 };
1134     JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap);
1135     JSC::addToStatistics<NumberHeap>(statistics, numberHeap);
1136     return statistics;
1137 }
1138 
globalObjectCount()1139 size_t Heap::globalObjectCount()
1140 {
1141     size_t count = 0;
1142     if (JSGlobalObject* head = m_globalData->head) {
1143         JSGlobalObject* o = head;
1144         do {
1145             ++count;
1146             o = o->next();
1147         } while (o != head);
1148     }
1149     return count;
1150 }
1151 
protectedGlobalObjectCount()1152 size_t Heap::protectedGlobalObjectCount()
1153 {
1154     if (m_protectedValuesMutex)
1155         m_protectedValuesMutex->lock();
1156 
1157     size_t count = 0;
1158     if (JSGlobalObject* head = m_globalData->head) {
1159         JSGlobalObject* o = head;
1160         do {
1161             if (m_protectedValues.contains(o))
1162                 ++count;
1163             o = o->next();
1164         } while (o != head);
1165     }
1166 
1167     if (m_protectedValuesMutex)
1168         m_protectedValuesMutex->unlock();
1169 
1170     return count;
1171 }
1172 
protectedObjectCount()1173 size_t Heap::protectedObjectCount()
1174 {
1175     if (m_protectedValuesMutex)
1176         m_protectedValuesMutex->lock();
1177 
1178     size_t result = m_protectedValues.size();
1179 
1180     if (m_protectedValuesMutex)
1181         m_protectedValuesMutex->unlock();
1182 
1183     return result;
1184 }
1185 
typeName(JSCell * cell)1186 static const char* typeName(JSCell* cell)
1187 {
1188     if (cell->isString())
1189         return "string";
1190 #if USE(JSVALUE32)
1191     if (cell->isNumber())
1192         return "number";
1193 #endif
1194     if (cell->isGetterSetter())
1195         return "gettersetter";
1196     ASSERT(cell->isObject());
1197     const ClassInfo* info = static_cast<JSObject*>(cell)->classInfo();
1198     return info ? info->className : "Object";
1199 }
1200 
protectedObjectTypeCounts()1201 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1202 {
1203     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1204 
1205     if (m_protectedValuesMutex)
1206         m_protectedValuesMutex->lock();
1207 
1208     ProtectCountSet::iterator end = m_protectedValues.end();
1209     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1210         counts->add(typeName(it->first));
1211 
1212     if (m_protectedValuesMutex)
1213         m_protectedValuesMutex->unlock();
1214 
1215     return counts;
1216 }
1217 
isBusy()1218 bool Heap::isBusy()
1219 {
1220     return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1221 }
1222 
primaryHeapBegin()1223 Heap::iterator Heap::primaryHeapBegin()
1224 {
1225     return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1226 }
1227 
primaryHeapEnd()1228 Heap::iterator Heap::primaryHeapEnd()
1229 {
1230     return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1231 }
1232 
1233 } // namespace JSC
1234