1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "config.h"
32 #include "platform/heap/Heap.h"
33
34 #include "platform/TraceEvent.h"
35 #include "platform/heap/ThreadState.h"
36 #include "public/platform/Platform.h"
37 #include "wtf/Assertions.h"
38 #include "wtf/LeakAnnotations.h"
39 #include "wtf/PassOwnPtr.h"
40 #if ENABLE(GC_TRACING)
41 #include "wtf/HashMap.h"
42 #include "wtf/HashSet.h"
43 #include "wtf/text/StringBuilder.h"
44 #include "wtf/text/StringHash.h"
45 #include <stdio.h>
46 #include <utility>
47 #endif
48
49 #if OS(POSIX)
50 #include <sys/mman.h>
51 #include <unistd.h>
52 #elif OS(WIN)
53 #include <windows.h>
54 #endif
55
56 namespace WebCore {
57
58 #if ENABLE(GC_TRACING)
classOf(const void * object)59 static String classOf(const void* object)
60 {
61 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
62 if (gcInfo)
63 return gcInfo->m_className;
64
65 return "unknown";
66 }
67 #endif
68
vTableInitialized(void * objectPointer)69 static bool vTableInitialized(void* objectPointer)
70 {
71 return !!(*reinterpret_cast<Address*>(objectPointer));
72 }
73
74 #if OS(WIN)
IsPowerOf2(size_t power)75 static bool IsPowerOf2(size_t power)
76 {
77 return !((power - 1) & power);
78 }
79 #endif
80
roundToBlinkPageBoundary(void * base)81 static Address roundToBlinkPageBoundary(void* base)
82 {
83 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
84 }
85
roundToOsPageSize(size_t size)86 static size_t roundToOsPageSize(size_t size)
87 {
88 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
89 }
90
osPageSize()91 size_t osPageSize()
92 {
93 #if OS(POSIX)
94 static const size_t pageSize = getpagesize();
95 #else
96 static size_t pageSize = 0;
97 if (!pageSize) {
98 SYSTEM_INFO info;
99 GetSystemInfo(&info);
100 pageSize = info.dwPageSize;
101 ASSERT(IsPowerOf2(pageSize));
102 }
103 #endif
104 return pageSize;
105 }
106
107 class MemoryRegion {
108 public:
MemoryRegion(Address base,size_t size)109 MemoryRegion(Address base, size_t size)
110 : m_base(base)
111 , m_size(size)
112 {
113 ASSERT(size > 0);
114 }
115
contains(Address addr) const116 bool contains(Address addr) const
117 {
118 return m_base <= addr && addr < (m_base + m_size);
119 }
120
121
contains(const MemoryRegion & other) const122 bool contains(const MemoryRegion& other) const
123 {
124 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
125 }
126
release()127 void release()
128 {
129 #if OS(POSIX)
130 int err = munmap(m_base, m_size);
131 RELEASE_ASSERT(!err);
132 #else
133 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
134 RELEASE_ASSERT(success);
135 #endif
136 }
137
commit()138 WARN_UNUSED_RETURN bool commit()
139 {
140 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
141 #if OS(POSIX)
142 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
143 if (!err) {
144 madvise(m_base, m_size, MADV_NORMAL);
145 return true;
146 }
147 return false;
148 #else
149 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
150 return !!result;
151 #endif
152 }
153
decommit()154 void decommit()
155 {
156 #if OS(POSIX)
157 int err = mprotect(m_base, m_size, PROT_NONE);
158 RELEASE_ASSERT(!err);
159 // FIXME: Consider using MADV_FREE on MacOS.
160 madvise(m_base, m_size, MADV_DONTNEED);
161 #else
162 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
163 RELEASE_ASSERT(success);
164 #endif
165 }
166
base() const167 Address base() const { return m_base; }
size() const168 size_t size() const { return m_size; }
169
170 private:
171 Address m_base;
172 size_t m_size;
173 };
174
175 // Representation of the memory used for a Blink heap page.
176 //
177 // The representation keeps track of two memory regions:
178 //
179 // 1. The virtual memory reserved from the sytem in order to be able
180 // to free all the virtual memory reserved on destruction.
181 //
182 // 2. The writable memory (a sub-region of the reserved virtual
183 // memory region) that is used for the actual heap page payload.
184 //
185 // Guard pages are created before and after the writable memory.
186 class PageMemory {
187 public:
~PageMemory()188 ~PageMemory()
189 {
190 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
191 m_reserved.release();
192 }
193
commit()194 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
decommit()195 void decommit() { m_writable.decommit(); }
196
writableStart()197 Address writableStart() { return m_writable.base(); }
198
199 // Allocate a virtual address space for the blink page with the
200 // following layout:
201 //
202 // [ guard os page | ... payload ... | guard os page ]
203 // ^---{ aligned to blink page size }
204 //
allocate(size_t payloadSize)205 static PageMemory* allocate(size_t payloadSize)
206 {
207 ASSERT(payloadSize > 0);
208
209 // Virtual memory allocation routines operate in OS page sizes.
210 // Round up the requested size to nearest os page size.
211 payloadSize = roundToOsPageSize(payloadSize);
212
213 // Overallocate by blinkPageSize and 2 times OS page size to
214 // ensure a chunk of memory which is blinkPageSize aligned and
215 // has a system page before and after to use for guarding. We
216 // unmap the excess memory before returning.
217 size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
218
219 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
220 #if OS(POSIX)
221 Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
222 RELEASE_ASSERT(base != MAP_FAILED);
223
224 Address end = base + allocationSize;
225 Address alignedBase = roundToBlinkPageBoundary(base);
226 Address payloadBase = alignedBase + osPageSize();
227 Address payloadEnd = payloadBase + payloadSize;
228 Address blinkPageEnd = payloadEnd + osPageSize();
229
230 // If the allocate memory was not blink page aligned release
231 // the memory before the aligned address.
232 if (alignedBase != base)
233 MemoryRegion(base, alignedBase - base).release();
234
235 // Create guard pages by decommiting an OS page before and
236 // after the payload.
237 MemoryRegion(alignedBase, osPageSize()).decommit();
238 MemoryRegion(payloadEnd, osPageSize()).decommit();
239
240 // Free the additional memory at the end of the page if any.
241 if (blinkPageEnd < end)
242 MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
243
244 return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
245 #else
246 Address base = 0;
247 Address alignedBase = 0;
248
249 // On Windows it is impossible to partially release a region
250 // of memory allocated by VirtualAlloc. To avoid wasting
251 // virtual address space we attempt to release a large region
252 // of memory returned as a whole and then allocate an aligned
253 // region inside this larger region.
254 for (int attempt = 0; attempt < 3; attempt++) {
255 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
256 RELEASE_ASSERT(base);
257 VirtualFree(base, 0, MEM_RELEASE);
258
259 alignedBase = roundToBlinkPageBoundary(base);
260 base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
261 if (base) {
262 RELEASE_ASSERT(base == alignedBase);
263 allocationSize = payloadSize + 2 * osPageSize();
264 break;
265 }
266 }
267
268 if (!base) {
269 // We failed to avoid wasting virtual address space after
270 // several attempts.
271 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
272 RELEASE_ASSERT(base);
273
274 // FIXME: If base is by accident blink page size aligned
275 // here then we can create two pages out of reserved
276 // space. Do this.
277 alignedBase = roundToBlinkPageBoundary(base);
278 }
279
280 Address payloadBase = alignedBase + osPageSize();
281 PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
282 bool res = storage->commit();
283 RELEASE_ASSERT(res);
284 return storage;
285 #endif
286 }
287
288 private:
PageMemory(const MemoryRegion & reserved,const MemoryRegion & writable)289 PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
290 : m_reserved(reserved)
291 , m_writable(writable)
292 {
293 ASSERT(reserved.contains(writable));
294
295 // Register the writable area of the memory as part of the LSan root set.
296 // Only the writable area is mapped and can contain C++ objects. Those
297 // C++ objects can contain pointers to objects outside of the heap and
298 // should therefore be part of the LSan root set.
299 __lsan_register_root_region(m_writable.base(), m_writable.size());
300 }
301
302 MemoryRegion m_reserved;
303 MemoryRegion m_writable;
304 };
305
306 class GCScope {
307 public:
GCScope(ThreadState::StackState stackState)308 explicit GCScope(ThreadState::StackState stackState)
309 : m_state(ThreadState::current())
310 , m_safePointScope(stackState)
311 , m_parkedAllThreads(false)
312 {
313 TRACE_EVENT0("Blink", "Heap::GCScope");
314 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
315 if (m_state->isMainThread())
316 TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCWaiting");
317
318 m_state->checkThread();
319
320 // FIXME: in an unlikely coincidence that two threads decide
321 // to collect garbage at the same time, avoid doing two GCs in
322 // a row.
323 RELEASE_ASSERT(!m_state->isInGC());
324 RELEASE_ASSERT(!m_state->isSweepInProgress());
325 if (LIKELY(ThreadState::stopThreads())) {
326 m_parkedAllThreads = true;
327 m_state->enterGC();
328 }
329 if (m_state->isMainThread())
330 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
331 }
332
allThreadsParked()333 bool allThreadsParked() { return m_parkedAllThreads; }
334
~GCScope()335 ~GCScope()
336 {
337 // Only cleanup if we parked all threads in which case the GC happened
338 // and we need to resume the other threads.
339 if (LIKELY(m_parkedAllThreads)) {
340 m_state->leaveGC();
341 ASSERT(!m_state->isInGC());
342 ThreadState::resumeThreads();
343 }
344 }
345
346 private:
347 ThreadState* m_state;
348 ThreadState::SafePointScope m_safePointScope;
349 bool m_parkedAllThreads; // False if we fail to park all threads
350 };
351
352 NO_SANITIZE_ADDRESS
isMarked() const353 bool HeapObjectHeader::isMarked() const
354 {
355 checkHeader();
356 return m_size & markBitMask;
357 }
358
359 NO_SANITIZE_ADDRESS
unmark()360 void HeapObjectHeader::unmark()
361 {
362 checkHeader();
363 m_size &= ~markBitMask;
364 }
365
366 NO_SANITIZE_ADDRESS
hasDebugMark() const367 bool HeapObjectHeader::hasDebugMark() const
368 {
369 checkHeader();
370 return m_size & debugBitMask;
371 }
372
373 NO_SANITIZE_ADDRESS
clearDebugMark()374 void HeapObjectHeader::clearDebugMark()
375 {
376 checkHeader();
377 m_size &= ~debugBitMask;
378 }
379
380 NO_SANITIZE_ADDRESS
setDebugMark()381 void HeapObjectHeader::setDebugMark()
382 {
383 checkHeader();
384 m_size |= debugBitMask;
385 }
386
387 #ifndef NDEBUG
388 NO_SANITIZE_ADDRESS
zapMagic()389 void HeapObjectHeader::zapMagic()
390 {
391 m_magic = zappedMagic;
392 }
393 #endif
394
fromPayload(const void * payload)395 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
396 {
397 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
398 HeapObjectHeader* header =
399 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
400 return header;
401 }
402
finalize(const GCInfo * gcInfo,Address object,size_t objectSize)403 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
404 {
405 ASSERT(gcInfo);
406 if (gcInfo->hasFinalizer()) {
407 gcInfo->m_finalize(object);
408 }
409 #if !defined(NDEBUG) || defined(LEAK_SANITIZER)
410 // Zap freed memory with a recognizable zap value in debug mode.
411 // Also zap when using leak sanitizer because the heap is used as
412 // a root region for lsan and therefore pointers in unreachable
413 // memory could hide leaks.
414 for (size_t i = 0; i < objectSize; i++)
415 object[i] = finalizedZapValue;
416 #endif
417 // Zap the primary vTable entry (secondary vTable entries are not zapped)
418 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
419 }
420
421 NO_SANITIZE_ADDRESS
finalize()422 void FinalizedHeapObjectHeader::finalize()
423 {
424 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
425 }
426
427 template<typename Header>
unmark()428 void LargeHeapObject<Header>::unmark()
429 {
430 return heapObjectHeader()->unmark();
431 }
432
433 template<typename Header>
isMarked()434 bool LargeHeapObject<Header>::isMarked()
435 {
436 return heapObjectHeader()->isMarked();
437 }
438
439 template<typename Header>
checkAndMarkPointer(Visitor * visitor,Address address)440 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
441 {
442 ASSERT(contains(address));
443 if (!objectContains(address))
444 return;
445 #if ENABLE(GC_TRACING)
446 visitor->setHostInfo(&address, "stack");
447 #endif
448 mark(visitor);
449 }
450
451 template<>
mark(Visitor * visitor)452 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
453 {
454 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload()))
455 visitor->markConservatively(heapObjectHeader());
456 else
457 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
458 }
459
460 template<>
mark(Visitor * visitor)461 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
462 {
463 ASSERT(gcInfo());
464 if (gcInfo()->hasVTable() && !vTableInitialized(payload()))
465 visitor->markConservatively(heapObjectHeader());
466 else
467 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
468 }
469
470 template<>
finalize()471 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
472 {
473 heapObjectHeader()->finalize();
474 }
475
476 template<>
finalize()477 void LargeHeapObject<HeapObjectHeader>::finalize()
478 {
479 ASSERT(gcInfo());
480 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
481 }
482
fromPayload(const void * payload)483 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
484 {
485 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
486 FinalizedHeapObjectHeader* header =
487 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
488 return header;
489 }
490
491 template<typename Header>
ThreadHeap(ThreadState * state)492 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
493 : m_currentAllocationPoint(0)
494 , m_remainingAllocationSize(0)
495 , m_firstPage(0)
496 , m_firstLargeHeapObject(0)
497 , m_biggestFreeListIndex(0)
498 , m_threadState(state)
499 , m_pagePool(0)
500 {
501 clearFreeLists();
502 }
503
504 template<typename Header>
~ThreadHeap()505 ThreadHeap<Header>::~ThreadHeap()
506 {
507 clearFreeLists();
508 if (!ThreadState::current()->isMainThread())
509 assertEmpty();
510 deletePages();
511 }
512
513 template<typename Header>
outOfLineAllocate(size_t size,const GCInfo * gcInfo)514 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
515 {
516 size_t allocationSize = allocationSizeFromSize(size);
517 if (threadState()->shouldGC()) {
518 if (threadState()->shouldForceConservativeGC())
519 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
520 else
521 threadState()->setGCRequested();
522 }
523 ensureCurrentAllocation(allocationSize, gcInfo);
524 return allocate(size, gcInfo);
525 }
526
527 template<typename Header>
allocateFromFreeList(size_t minSize)528 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
529 {
530 size_t bucketSize = 1 << m_biggestFreeListIndex;
531 int i = m_biggestFreeListIndex;
532 for (; i > 0; i--, bucketSize >>= 1) {
533 if (bucketSize < minSize)
534 break;
535 FreeListEntry* entry = m_freeLists[i];
536 if (entry) {
537 m_biggestFreeListIndex = i;
538 entry->unlink(&m_freeLists[i]);
539 setAllocationPoint(entry->address(), entry->size());
540 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
541 return true;
542 }
543 }
544 m_biggestFreeListIndex = i;
545 return false;
546 }
547
548 template<typename Header>
ensureCurrentAllocation(size_t minSize,const GCInfo * gcInfo)549 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
550 {
551 ASSERT(minSize >= allocationGranularity);
552 if (remainingAllocationSize() >= minSize)
553 return;
554
555 if (remainingAllocationSize() > 0)
556 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
557 if (allocateFromFreeList(minSize))
558 return;
559 addPageToHeap(gcInfo);
560 bool success = allocateFromFreeList(minSize);
561 RELEASE_ASSERT(success);
562 }
563
564 template<typename Header>
heapPageFromAddress(Address address)565 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
566 {
567 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
568 if (page->contains(address))
569 return page;
570 }
571 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
572 // Check that large pages are blinkPageSize aligned (modulo the
573 // osPageSize for the guard page).
574 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
575 if (current->contains(address))
576 return current;
577 }
578 return 0;
579 }
580
581 #if ENABLE(GC_TRACING)
582 template<typename Header>
findGCInfoOfLargeHeapObject(Address address)583 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
584 {
585 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
586 if (current->contains(address))
587 return current->gcInfo();
588 }
589 return 0;
590 }
591 #endif
592
593 template<typename Header>
addToFreeList(Address address,size_t size)594 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
595 {
596 ASSERT(heapPageFromAddress(address));
597 ASSERT(heapPageFromAddress(address + size - 1));
598 ASSERT(size < blinkPagePayloadSize());
599 // The free list entries are only pointer aligned (but when we allocate
600 // from them we are 8 byte aligned due to the header size).
601 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
602 ASSERT(!(size & allocationMask));
603 ASAN_POISON_MEMORY_REGION(address, size);
604 FreeListEntry* entry;
605 if (size < sizeof(*entry)) {
606 // Create a dummy header with only a size and freelist bit set.
607 ASSERT(size >= sizeof(BasicObjectHeader));
608 // Free list encode the size to mark the lost memory as freelist memory.
609 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
610 // This memory gets lost. Sweeping can reclaim it.
611 return;
612 }
613 entry = new (NotNull, address) FreeListEntry(size);
614 #if defined(ADDRESS_SANITIZER)
615 // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
616 // reaches zero. However we always add entire pages to ensure that adding a new page will
617 // increase the allocation space.
618 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
619 return;
620 #endif
621 int index = bucketIndexForSize(size);
622 entry->link(&m_freeLists[index]);
623 if (index > m_biggestFreeListIndex)
624 m_biggestFreeListIndex = index;
625 }
626
627 template<typename Header>
allocateLargeObject(size_t size,const GCInfo * gcInfo)628 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
629 {
630 // Caller already added space for object header and rounded up to allocation alignment
631 ASSERT(!(size & allocationMask));
632
633 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
634
635 // Ensure that there is enough space for alignment. If the header
636 // is not a multiple of 8 bytes we will allocate an extra
637 // headerPadding<Header> bytes to ensure it 8 byte aligned.
638 allocationSize += headerPadding<Header>();
639
640 // If ASAN is supported we add allocationGranularity bytes to the allocated space and
641 // poison that to detect overflows
642 #if defined(ADDRESS_SANITIZER)
643 allocationSize += allocationGranularity;
644 #endif
645 if (threadState()->shouldGC())
646 threadState()->setGCRequested();
647 Heap::flushHeapDoesNotContainCache();
648 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
649 Address largeObjectAddress = pageMemory->writableStart();
650 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
651 memset(headerAddress, 0, size);
652 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
653 Address result = headerAddress + sizeof(*header);
654 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
655 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
656
657 // Poison the object header and allocationGranularity bytes after the object
658 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
659 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
660 largeObject->link(&m_firstLargeHeapObject);
661 stats().increaseAllocatedSpace(largeObject->size());
662 stats().increaseObjectSpace(largeObject->payloadSize());
663 return result;
664 }
665
666 template<typename Header>
freeLargeObject(LargeHeapObject<Header> * object,LargeHeapObject<Header> ** previousNext)667 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
668 {
669 flushHeapContainsCache();
670 object->unlink(previousNext);
671 object->finalize();
672
673 // Unpoison the object header and allocationGranularity bytes after the
674 // object before freeing.
675 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
676 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
677 delete object->storage();
678 }
679
680 template<>
addPageToHeap(const GCInfo * gcInfo)681 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
682 {
683 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
684 // the heap should be unused (ie. 0).
685 allocatePage(0);
686 }
687
688 template<>
addPageToHeap(const GCInfo * gcInfo)689 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
690 {
691 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
692 // since it is the same for all objects
693 ASSERT(gcInfo);
694 allocatePage(gcInfo);
695 }
696
697 template<typename Header>
clearPagePool()698 void ThreadHeap<Header>::clearPagePool()
699 {
700 while (takePageFromPool()) { }
701 }
702
703 template<typename Header>
takePageFromPool()704 PageMemory* ThreadHeap<Header>::takePageFromPool()
705 {
706 Heap::flushHeapDoesNotContainCache();
707 while (PagePoolEntry* entry = m_pagePool) {
708 m_pagePool = entry->next();
709 PageMemory* storage = entry->storage();
710 delete entry;
711
712 if (storage->commit())
713 return storage;
714
715 // Failed to commit pooled storage. Release it.
716 delete storage;
717 }
718
719 return 0;
720 }
721
722 template<typename Header>
addPageToPool(HeapPage<Header> * unused)723 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
724 {
725 flushHeapContainsCache();
726 PageMemory* storage = unused->storage();
727 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
728 m_pagePool = entry;
729 storage->decommit();
730 }
731
732 template<typename Header>
allocatePage(const GCInfo * gcInfo)733 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
734 {
735 Heap::flushHeapDoesNotContainCache();
736 PageMemory* pageMemory = takePageFromPool();
737 if (!pageMemory) {
738 pageMemory = PageMemory::allocate(blinkPagePayloadSize());
739 RELEASE_ASSERT(pageMemory);
740 }
741 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
742 // FIXME: Oilpan: Linking new pages into the front of the list is
743 // crucial when performing allocations during finalization because
744 // it ensures that those pages are not swept in the current GC
745 // round. We should create a separate page list for that to
746 // separate out the pages allocated during finalization clearly
747 // from the pages currently being swept.
748 page->link(&m_firstPage);
749 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
750 }
751
752 #ifndef NDEBUG
753 template<typename Header>
getScannedStats(HeapStats & scannedStats)754 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
755 {
756 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
757 page->getStats(scannedStats);
758 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
759 current->getStats(scannedStats);
760 }
761 #endif
762
763 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
764 // sweeping to catch cases where dead objects touch eachother. This is not
765 // turned on by default because it also triggers for cases that are safe.
766 // Examples of such safe cases are context life cycle observers and timers
767 // embedded in garbage collected objects.
768 #define STRICT_ASAN_FINALIZATION_CHECKING 0
769
770 template<typename Header>
sweep()771 void ThreadHeap<Header>::sweep()
772 {
773 ASSERT(isConsistentForGC());
774 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
775 // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
776 // calling their finalizer methods. This can catch the cases where one objects
777 // finalizer tries to modify another object as part of finalization.
778 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
779 page->poisonUnmarkedObjects();
780 #endif
781 HeapPage<Header>* page = m_firstPage;
782 HeapPage<Header>** previous = &m_firstPage;
783 bool pagesRemoved = false;
784 while (page) {
785 if (page->isEmpty()) {
786 flushHeapContainsCache();
787 HeapPage<Header>* unused = page;
788 page = page->next();
789 HeapPage<Header>::unlink(unused, previous);
790 pagesRemoved = true;
791 } else {
792 page->sweep();
793 previous = &page->m_next;
794 page = page->next();
795 }
796 }
797 if (pagesRemoved)
798 flushHeapContainsCache();
799
800 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
801 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
802 if (current->isMarked()) {
803 stats().increaseAllocatedSpace(current->size());
804 stats().increaseObjectSpace(current->payloadSize());
805 current->unmark();
806 previousNext = ¤t->m_next;
807 current = current->next();
808 } else {
809 LargeHeapObject<Header>* next = current->next();
810 freeLargeObject(current, previousNext);
811 current = next;
812 }
813 }
814 }
815
816 template<typename Header>
assertEmpty()817 void ThreadHeap<Header>::assertEmpty()
818 {
819 // No allocations are permitted. The thread is exiting.
820 NoAllocationScope<AnyThread> noAllocation;
821 makeConsistentForGC();
822 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
823 Address end = page->end();
824 Address headerAddress;
825 for (headerAddress = page->payload(); headerAddress < end; ) {
826 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
827 ASSERT(basicHeader->size() < blinkPagePayloadSize());
828 // A live object is potentially a dangling pointer from
829 // some root. Treat that as a bug. Unfortunately, it is
830 // hard to reliably check in the presence of conservative
831 // stack scanning. Something could be conservatively kept
832 // alive because a non-pointer on another thread's stack
833 // is treated as a pointer into the heap.
834 //
835 // FIXME: This assert can currently trigger in cases where
836 // worker shutdown does not get enough precise GCs to get
837 // all objects removed from the worker heap. There are two
838 // issues: 1) conservative GCs keeping objects alive, and
839 // 2) long chains of RefPtrs/Persistents that require more
840 // GCs to get everything cleaned up. Maybe we can keep
841 // threads alive until their heaps become empty instead of
842 // forcing the threads to die immediately?
843 ASSERT(Heap::lastGCWasConservative() || basicHeader->isFree());
844 if (basicHeader->isFree())
845 addToFreeList(headerAddress, basicHeader->size());
846 headerAddress += basicHeader->size();
847 }
848 ASSERT(headerAddress == end);
849 }
850
851 ASSERT(Heap::lastGCWasConservative() || !m_firstLargeHeapObject);
852 }
853
854 template<typename Header>
isConsistentForGC()855 bool ThreadHeap<Header>::isConsistentForGC()
856 {
857 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
858 if (m_freeLists[i])
859 return false;
860 }
861 return !ownsNonEmptyAllocationArea();
862 }
863
864 template<typename Header>
makeConsistentForGC()865 void ThreadHeap<Header>::makeConsistentForGC()
866 {
867 if (ownsNonEmptyAllocationArea())
868 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
869 setAllocationPoint(0, 0);
870 clearFreeLists();
871 }
872
873 template<typename Header>
clearMarks()874 void ThreadHeap<Header>::clearMarks()
875 {
876 ASSERT(isConsistentForGC());
877 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
878 page->clearMarks();
879 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
880 current->unmark();
881 }
882
883 template<typename Header>
deletePages()884 void ThreadHeap<Header>::deletePages()
885 {
886 flushHeapContainsCache();
887 // Add all pages in the pool to the heap's list of pages before deleting
888 clearPagePool();
889
890 for (HeapPage<Header>* page = m_firstPage; page; ) {
891 HeapPage<Header>* dead = page;
892 page = page->next();
893 PageMemory* storage = dead->storage();
894 dead->~HeapPage();
895 delete storage;
896 }
897 m_firstPage = 0;
898
899 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
900 LargeHeapObject<Header>* dead = current;
901 current = current->next();
902 PageMemory* storage = dead->storage();
903 dead->~LargeHeapObject();
904 delete storage;
905 }
906 m_firstLargeHeapObject = 0;
907 }
908
909 template<typename Header>
clearFreeLists()910 void ThreadHeap<Header>::clearFreeLists()
911 {
912 for (size_t i = 0; i < blinkPageSizeLog2; i++)
913 m_freeLists[i] = 0;
914 }
915
bucketIndexForSize(size_t size)916 int BaseHeap::bucketIndexForSize(size_t size)
917 {
918 ASSERT(size > 0);
919 int index = -1;
920 while (size) {
921 size >>= 1;
922 index++;
923 }
924 return index;
925 }
926
927 template<typename Header>
HeapPage(PageMemory * storage,ThreadHeap<Header> * heap,const GCInfo * gcInfo)928 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
929 : BaseHeapPage(storage, gcInfo, heap->threadState())
930 , m_next(0)
931 , m_heap(heap)
932 {
933 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
934 m_objectStartBitMapComputed = false;
935 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
936 heap->stats().increaseAllocatedSpace(blinkPageSize);
937 }
938
939 template<typename Header>
link(HeapPage ** prevNext)940 void HeapPage<Header>::link(HeapPage** prevNext)
941 {
942 m_next = *prevNext;
943 *prevNext = this;
944 }
945
946 template<typename Header>
unlink(HeapPage * unused,HeapPage ** prevNext)947 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
948 {
949 *prevNext = unused->m_next;
950 unused->heap()->addPageToPool(unused);
951 }
952
953 template<typename Header>
getStats(HeapStats & stats)954 void HeapPage<Header>::getStats(HeapStats& stats)
955 {
956 stats.increaseAllocatedSpace(blinkPageSize);
957 Address headerAddress = payload();
958 ASSERT(headerAddress != end());
959 do {
960 Header* header = reinterpret_cast<Header*>(headerAddress);
961 if (!header->isFree())
962 stats.increaseObjectSpace(header->payloadSize());
963 ASSERT(header->size() < blinkPagePayloadSize());
964 headerAddress += header->size();
965 ASSERT(headerAddress <= end());
966 } while (headerAddress < end());
967 }
968
969 template<typename Header>
isEmpty()970 bool HeapPage<Header>::isEmpty()
971 {
972 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
973 return header->isFree() && (header->size() == payloadSize());
974 }
975
976 template<typename Header>
sweep()977 void HeapPage<Header>::sweep()
978 {
979 clearObjectStartBitMap();
980 heap()->stats().increaseAllocatedSpace(blinkPageSize);
981 Address startOfGap = payload();
982 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
983 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
984 ASSERT(basicHeader->size() < blinkPagePayloadSize());
985
986 if (basicHeader->isFree()) {
987 headerAddress += basicHeader->size();
988 continue;
989 }
990 // At this point we know this is a valid object of type Header
991 Header* header = static_cast<Header*>(basicHeader);
992
993 if (!header->isMarked()) {
994 // For ASAN we unpoison the specific object when calling the finalizer and
995 // poison it again when done to allow the object's own finalizer to operate
996 // on the object, but not have other finalizers be allowed to access it.
997 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
998 finalize(header);
999 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1000 headerAddress += header->size();
1001 continue;
1002 }
1003
1004 if (startOfGap != headerAddress)
1005 heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
1006 header->unmark();
1007 headerAddress += header->size();
1008 heap()->stats().increaseObjectSpace(header->payloadSize());
1009 startOfGap = headerAddress;
1010 }
1011 if (startOfGap != end())
1012 heap()->addToFreeList(startOfGap, end() - startOfGap);
1013 }
1014
1015 template<typename Header>
clearMarks()1016 void HeapPage<Header>::clearMarks()
1017 {
1018 for (Address headerAddress = payload(); headerAddress < end();) {
1019 Header* header = reinterpret_cast<Header*>(headerAddress);
1020 ASSERT(header->size() < blinkPagePayloadSize());
1021 if (!header->isFree())
1022 header->unmark();
1023 headerAddress += header->size();
1024 }
1025 }
1026
1027 template<typename Header>
populateObjectStartBitMap()1028 void HeapPage<Header>::populateObjectStartBitMap()
1029 {
1030 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1031 Address start = payload();
1032 for (Address headerAddress = start; headerAddress < end();) {
1033 Header* header = reinterpret_cast<Header*>(headerAddress);
1034 size_t objectOffset = headerAddress - start;
1035 ASSERT(!(objectOffset & allocationMask));
1036 size_t objectStartNumber = objectOffset / allocationGranularity;
1037 size_t mapIndex = objectStartNumber / 8;
1038 ASSERT(mapIndex < objectStartBitMapSize);
1039 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1040 headerAddress += header->size();
1041 ASSERT(headerAddress <= end());
1042 }
1043 m_objectStartBitMapComputed = true;
1044 }
1045
1046 template<typename Header>
clearObjectStartBitMap()1047 void HeapPage<Header>::clearObjectStartBitMap()
1048 {
1049 m_objectStartBitMapComputed = false;
1050 }
1051
numberOfLeadingZeroes(uint8_t byte)1052 static int numberOfLeadingZeroes(uint8_t byte)
1053 {
1054 if (!byte)
1055 return 8;
1056 int result = 0;
1057 if (byte <= 0x0F) {
1058 result += 4;
1059 byte = byte << 4;
1060 }
1061 if (byte <= 0x3F) {
1062 result += 2;
1063 byte = byte << 2;
1064 }
1065 if (byte <= 0x7F)
1066 result++;
1067 return result;
1068 }
1069
1070 template<typename Header>
findHeaderFromAddress(Address address)1071 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1072 {
1073 if (address < payload())
1074 return 0;
1075 if (!isObjectStartBitMapComputed())
1076 populateObjectStartBitMap();
1077 size_t objectOffset = address - payload();
1078 size_t objectStartNumber = objectOffset / allocationGranularity;
1079 size_t mapIndex = objectStartNumber / 8;
1080 ASSERT(mapIndex < objectStartBitMapSize);
1081 size_t bit = objectStartNumber & 7;
1082 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1083 while (!byte) {
1084 ASSERT(mapIndex > 0);
1085 byte = m_objectStartBitMap[--mapIndex];
1086 }
1087 int leadingZeroes = numberOfLeadingZeroes(byte);
1088 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1089 objectOffset = objectStartNumber * allocationGranularity;
1090 Address objectAddress = objectOffset + payload();
1091 Header* header = reinterpret_cast<Header*>(objectAddress);
1092 if (header->isFree())
1093 return 0;
1094 return header;
1095 }
1096
1097 template<typename Header>
checkAndMarkPointer(Visitor * visitor,Address address)1098 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1099 {
1100 ASSERT(contains(address));
1101 Header* header = findHeaderFromAddress(address);
1102 if (!header)
1103 return;
1104
1105 #if ENABLE(GC_TRACING)
1106 visitor->setHostInfo(&address, "stack");
1107 #endif
1108 if (hasVTable(header) && !vTableInitialized(header->payload()))
1109 visitor->markConservatively(header);
1110 else
1111 visitor->mark(header, traceCallback(header));
1112 }
1113
1114 #if ENABLE(GC_TRACING)
1115 template<typename Header>
findGCInfo(Address address)1116 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1117 {
1118 if (address < payload())
1119 return 0;
1120
1121 if (gcInfo()) // for non FinalizedObjectHeader
1122 return gcInfo();
1123
1124 Header* header = findHeaderFromAddress(address);
1125 if (!header)
1126 return 0;
1127
1128 return header->gcInfo();
1129 }
1130 #endif
1131
1132 #if defined(ADDRESS_SANITIZER)
1133 template<typename Header>
poisonUnmarkedObjects()1134 void HeapPage<Header>::poisonUnmarkedObjects()
1135 {
1136 for (Address headerAddress = payload(); headerAddress < end(); ) {
1137 Header* header = reinterpret_cast<Header*>(headerAddress);
1138 ASSERT(header->size() < blinkPagePayloadSize());
1139
1140 if (!header->isFree() && !header->isMarked())
1141 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1142 headerAddress += header->size();
1143 }
1144 }
1145 #endif
1146
1147 template<>
finalize(FinalizedHeapObjectHeader * header)1148 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1149 {
1150 header->finalize();
1151 }
1152
1153 template<>
finalize(HeapObjectHeader * header)1154 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1155 {
1156 ASSERT(gcInfo());
1157 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1158 }
1159
1160 template<>
traceCallback(HeapObjectHeader * header)1161 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1162 {
1163 ASSERT(gcInfo());
1164 return gcInfo()->m_trace;
1165 }
1166
1167 template<>
traceCallback(FinalizedHeapObjectHeader * header)1168 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1169 {
1170 return header->traceCallback();
1171 }
1172
1173 template<>
hasVTable(HeapObjectHeader * header)1174 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1175 {
1176 ASSERT(gcInfo());
1177 return gcInfo()->hasVTable();
1178 }
1179
1180 template<>
hasVTable(FinalizedHeapObjectHeader * header)1181 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1182 {
1183 return header->hasVTable();
1184 }
1185
1186 template<typename Header>
getStats(HeapStats & stats)1187 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1188 {
1189 stats.increaseAllocatedSpace(size());
1190 stats.increaseObjectSpace(payloadSize());
1191 }
1192
1193 template<typename Entry>
flush()1194 void HeapExtentCache<Entry>::flush()
1195 {
1196 if (m_hasEntries) {
1197 for (int i = 0; i < numberOfEntries; i++)
1198 m_entries[i] = Entry();
1199 m_hasEntries = false;
1200 }
1201 }
1202
1203 template<typename Entry>
hash(Address address)1204 size_t HeapExtentCache<Entry>::hash(Address address)
1205 {
1206 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1207 value ^= value >> numberOfEntriesLog2;
1208 value ^= value >> (numberOfEntriesLog2 * 2);
1209 value &= numberOfEntries - 1;
1210 return value & ~1; // Returns only even number.
1211 }
1212
1213 template<typename Entry>
lookup(Address address)1214 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1215 {
1216 size_t index = hash(address);
1217 ASSERT(!(index & 1));
1218 Address cachePage = roundToBlinkPageStart(address);
1219 if (m_entries[index].address() == cachePage)
1220 return m_entries[index].result();
1221 if (m_entries[index + 1].address() == cachePage)
1222 return m_entries[index + 1].result();
1223 return 0;
1224 }
1225
1226 template<typename Entry>
addEntry(Address address,typename Entry::LookupResult entry)1227 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry)
1228 {
1229 m_hasEntries = true;
1230 size_t index = hash(address);
1231 ASSERT(!(index & 1));
1232 Address cachePage = roundToBlinkPageStart(address);
1233 m_entries[index + 1] = m_entries[index];
1234 m_entries[index] = Entry(cachePage, entry);
1235 }
1236
1237 // These should not be needed, but it seems impossible to persuade clang to
1238 // instantiate the template functions and export them from a shared library, so
1239 // we add these in the non-templated subclass, which does not have that issue.
addEntry(Address address,BaseHeapPage * page)1240 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1241 {
1242 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1243 }
1244
lookup(Address address)1245 BaseHeapPage* HeapContainsCache::lookup(Address address)
1246 {
1247 return HeapExtentCache<PositiveEntry>::lookup(address);
1248 }
1249
flushHeapDoesNotContainCache()1250 void Heap::flushHeapDoesNotContainCache()
1251 {
1252 s_heapDoesNotContainCache->flush();
1253 }
1254
init(CallbackStack ** first)1255 void CallbackStack::init(CallbackStack** first)
1256 {
1257 // The stacks are chained, so we start by setting this to null as terminator.
1258 *first = 0;
1259 *first = new CallbackStack(first);
1260 }
1261
shutdown(CallbackStack ** first)1262 void CallbackStack::shutdown(CallbackStack** first)
1263 {
1264 CallbackStack* next;
1265 for (CallbackStack* current = *first; current; current = next) {
1266 next = current->m_next;
1267 delete current;
1268 }
1269 *first = 0;
1270 }
1271
~CallbackStack()1272 CallbackStack::~CallbackStack()
1273 {
1274 #ifndef NDEBUG
1275 clearUnused();
1276 #endif
1277 }
1278
clearUnused()1279 void CallbackStack::clearUnused()
1280 {
1281 ASSERT(m_current == &(m_buffer[0]));
1282 for (size_t i = 0; i < bufferSize; i++)
1283 m_buffer[i] = Item(0, 0);
1284 }
1285
assertIsEmpty()1286 void CallbackStack::assertIsEmpty()
1287 {
1288 ASSERT(m_current == &(m_buffer[0]));
1289 ASSERT(!m_next);
1290 }
1291
popAndInvokeCallback(CallbackStack ** first,Visitor * visitor)1292 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1293 {
1294 if (m_current == &(m_buffer[0])) {
1295 if (!m_next) {
1296 #ifndef NDEBUG
1297 clearUnused();
1298 #endif
1299 return false;
1300 }
1301 CallbackStack* nextStack = m_next;
1302 *first = nextStack;
1303 delete this;
1304 return nextStack->popAndInvokeCallback(first, visitor);
1305 }
1306 Item* item = --m_current;
1307
1308 VisitorCallback callback = item->callback();
1309 #if ENABLE(GC_TRACING)
1310 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndInvokeCallback
1311 visitor->setHostInfo(item->object(), classOf(item->object()));
1312 #endif
1313 callback(visitor, item->object());
1314
1315 return true;
1316 }
1317
1318 class MarkingVisitor : public Visitor {
1319 public:
1320 #if ENABLE(GC_TRACING)
1321 typedef HashSet<uintptr_t> LiveObjectSet;
1322 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1323 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1324 #endif
1325
visitHeader(HeapObjectHeader * header,const void * objectPointer,TraceCallback callback)1326 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1327 {
1328 ASSERT(header);
1329 ASSERT(objectPointer);
1330 if (header->isMarked())
1331 return;
1332 header->mark();
1333 #if ENABLE(GC_TRACING)
1334 MutexLocker locker(objectGraphMutex());
1335 String className(classOf(objectPointer));
1336 {
1337 LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1338 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1339 }
1340 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1341 ASSERT(result.isNewEntry);
1342 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1343 #endif
1344 if (callback)
1345 Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1346 }
1347
mark(HeapObjectHeader * header,TraceCallback callback)1348 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1349 {
1350 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1351 // version to correctly find the payload.
1352 visitHeader(header, header->payload(), callback);
1353 }
1354
mark(FinalizedHeapObjectHeader * header,TraceCallback callback)1355 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1356 {
1357 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1358 // version to correctly find the payload.
1359 visitHeader(header, header->payload(), callback);
1360 }
1361
mark(const void * objectPointer,TraceCallback callback)1362 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1363 {
1364 if (!objectPointer)
1365 return;
1366 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1367 visitHeader(header, header->payload(), callback);
1368 }
1369
1370
visitConservatively(HeapObjectHeader * header,void * objectPointer,size_t objectSize)1371 inline void visitConservatively(HeapObjectHeader* header, void* objectPointer, size_t objectSize)
1372 {
1373 ASSERT(header);
1374 ASSERT(objectPointer);
1375 if (header->isMarked())
1376 return;
1377 header->mark();
1378
1379 // Scan through the object's fields and visit them conservatively.
1380 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1381 for (size_t i = 0; i < objectSize / sizeof(Address); ++i)
1382 Heap::checkAndMarkPointer(this, objectFields[i]);
1383 }
1384
markConservatively(HeapObjectHeader * header)1385 virtual void markConservatively(HeapObjectHeader* header)
1386 {
1387 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1388 // version to correctly find the payload.
1389 visitConservatively(header, header->payload(), header->payloadSize());
1390 }
1391
markConservatively(FinalizedHeapObjectHeader * header)1392 virtual void markConservatively(FinalizedHeapObjectHeader* header)
1393 {
1394 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1395 // version to correctly find the payload.
1396 visitConservatively(header, header->payload(), header->payloadSize());
1397 }
1398
registerWeakMembers(const void * closure,const void * containingObject,WeakPointerCallback callback)1399 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1400 {
1401 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1402 }
1403
isMarked(const void * objectPointer)1404 virtual bool isMarked(const void* objectPointer) OVERRIDE
1405 {
1406 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1407 }
1408
1409 // This macro defines the necessary visitor methods for typed heaps
1410 #define DEFINE_VISITOR_METHODS(Type) \
1411 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1412 { \
1413 if (!objectPointer) \
1414 return; \
1415 HeapObjectHeader* header = \
1416 HeapObjectHeader::fromPayload(objectPointer); \
1417 visitHeader(header, header->payload(), callback); \
1418 } \
1419 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1420 { \
1421 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1422 }
1423
FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)1424 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1425 #undef DEFINE_VISITOR_METHODS
1426
1427 #if ENABLE(GC_TRACING)
1428 void reportStats()
1429 {
1430 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
1431 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
1432 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
1433
1434 if (it->key == "WebCore::Document")
1435 reportStillAlive(it->value, previouslyLive().get(it->key));
1436
1437 fprintf(stderr, "\n");
1438 }
1439
1440 previouslyLive().swap(currentlyLive());
1441 currentlyLive().clear();
1442
1443 for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
1444 dumpPathToObjectFromObjectGraph(objectGraph(), *it);
1445 }
1446 }
1447
reportStillAlive(LiveObjectSet current,LiveObjectSet previous)1448 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
1449 {
1450 int count = 0;
1451
1452 fprintf(stderr, " [previously %u]", previous.size());
1453 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1454 if (previous.find(*it) == previous.end())
1455 continue;
1456 count++;
1457 }
1458
1459 if (!count)
1460 return;
1461
1462 fprintf(stderr, " {survived 2GCs %d: ", count);
1463 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1464 if (previous.find(*it) == previous.end())
1465 continue;
1466 fprintf(stderr, "%ld", *it);
1467 if (--count)
1468 fprintf(stderr, ", ");
1469 }
1470 ASSERT(!count);
1471 fprintf(stderr, "}");
1472 }
1473
dumpPathToObjectFromObjectGraph(const ObjectGraph & graph,uintptr_t target)1474 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
1475 {
1476 ObjectGraph::const_iterator it = graph.find(target);
1477 if (it == graph.end())
1478 return;
1479 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
1480 while (it != graph.end()) {
1481 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.utf8().data());
1482 it = graph.find(it->value.first);
1483 }
1484 fprintf(stderr, "\n");
1485 }
1486
dumpPathToObjectOnNextGC(void * p)1487 static void dumpPathToObjectOnNextGC(void* p)
1488 {
1489 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
1490 }
1491
objectGraphMutex()1492 static Mutex& objectGraphMutex()
1493 {
1494 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
1495 return mutex;
1496 }
1497
previouslyLive()1498 static LiveObjectMap& previouslyLive()
1499 {
1500 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1501 return map;
1502 }
1503
currentlyLive()1504 static LiveObjectMap& currentlyLive()
1505 {
1506 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1507 return map;
1508 }
1509
objectGraph()1510 static ObjectGraph& objectGraph()
1511 {
1512 DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
1513 return graph;
1514 }
1515
objectsToFindPath()1516 static HashSet<uintptr_t>& objectsToFindPath()
1517 {
1518 DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
1519 return set;
1520 }
1521 #endif
1522
1523 protected:
registerWeakCell(void ** cell,WeakPointerCallback callback)1524 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
1525 {
1526 Heap::pushWeakCellPointerCallback(cell, callback);
1527 }
1528 };
1529
init()1530 void Heap::init()
1531 {
1532 ThreadState::init();
1533 CallbackStack::init(&s_markingStack);
1534 CallbackStack::init(&s_weakCallbackStack);
1535 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1536 s_markingVisitor = new MarkingVisitor();
1537 }
1538
shutdown()1539 void Heap::shutdown()
1540 {
1541 s_shutdownCalled = true;
1542 ThreadState::shutdownHeapIfNecessary();
1543 }
1544
doShutdown()1545 void Heap::doShutdown()
1546 {
1547 // We don't want to call doShutdown() twice.
1548 if (!s_markingVisitor)
1549 return;
1550
1551 ASSERT(!ThreadState::isAnyThreadInGC());
1552 ASSERT(!ThreadState::attachedThreads().size());
1553 delete s_markingVisitor;
1554 s_markingVisitor = 0;
1555 delete s_heapDoesNotContainCache;
1556 s_heapDoesNotContainCache = 0;
1557 CallbackStack::shutdown(&s_weakCallbackStack);
1558 CallbackStack::shutdown(&s_markingStack);
1559 ThreadState::shutdown();
1560 }
1561
contains(Address address)1562 BaseHeapPage* Heap::contains(Address address)
1563 {
1564 ASSERT(ThreadState::isAnyThreadInGC());
1565 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1566 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1567 BaseHeapPage* page = (*it)->contains(address);
1568 if (page)
1569 return page;
1570 }
1571 return 0;
1572 }
1573
checkAndMarkPointer(Visitor * visitor,Address address)1574 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1575 {
1576 ASSERT(ThreadState::isAnyThreadInGC());
1577
1578 #ifdef NDEBUG
1579 if (s_heapDoesNotContainCache->lookup(address))
1580 return 0;
1581 #endif
1582
1583 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1584 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1585 if ((*it)->checkAndMarkPointer(visitor, address)) {
1586 // Pointer was in a page of that thread. If it actually pointed
1587 // into an object then that object was found and marked.
1588 ASSERT(!s_heapDoesNotContainCache->lookup(address));
1589 s_lastGCWasConservative = true;
1590 return address;
1591 }
1592 }
1593
1594 #ifdef NDEBUG
1595 s_heapDoesNotContainCache->addEntry(address, true);
1596 #else
1597 if (!s_heapDoesNotContainCache->lookup(address))
1598 s_heapDoesNotContainCache->addEntry(address, true);
1599 #endif
1600 return 0;
1601 }
1602
1603 #if ENABLE(GC_TRACING)
findGCInfo(Address address)1604 const GCInfo* Heap::findGCInfo(Address address)
1605 {
1606 return ThreadState::findGCInfoFromAllThreads(address);
1607 }
1608
dumpPathToObjectOnNextGC(void * p)1609 void Heap::dumpPathToObjectOnNextGC(void* p)
1610 {
1611 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
1612 }
1613
createBacktraceString()1614 String Heap::createBacktraceString()
1615 {
1616 int framesToShow = 3;
1617 int stackFrameSize = 16;
1618 ASSERT(stackFrameSize >= framesToShow);
1619 typedef void* FramePointer;
1620 FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePointer) * stackFrameSize));
1621 WTFGetBacktrace(stackFrame, &stackFrameSize);
1622
1623 StringBuilder builder;
1624 builder.append("Persistent");
1625 bool didAppendFirstName = false;
1626 // Skip frames before/including "WebCore::Persistent".
1627 bool didSeePersistent = false;
1628 for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) {
1629 FrameToNameScope frameToName(stackFrame[i]);
1630 if (!frameToName.nullableName())
1631 continue;
1632 if (strstr(frameToName.nullableName(), "WebCore::Persistent")) {
1633 didSeePersistent = true;
1634 continue;
1635 }
1636 if (!didSeePersistent)
1637 continue;
1638 if (!didAppendFirstName) {
1639 didAppendFirstName = true;
1640 builder.append(" ... Backtrace:");
1641 }
1642 builder.append("\n\t");
1643 builder.append(frameToName.nullableName());
1644 --framesToShow;
1645 }
1646 return builder.toString().replace("WebCore::", "");
1647 }
1648 #endif
1649
pushTraceCallback(void * object,TraceCallback callback)1650 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1651 {
1652 ASSERT(Heap::contains(object));
1653 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1654 *slot = CallbackStack::Item(object, callback);
1655 }
1656
popAndInvokeTraceCallback(Visitor * visitor)1657 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1658 {
1659 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1660 }
1661
pushWeakCellPointerCallback(void ** cell,WeakPointerCallback callback)1662 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
1663 {
1664 ASSERT(Heap::contains(cell));
1665 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1666 *slot = CallbackStack::Item(cell, callback);
1667 }
1668
pushWeakObjectPointerCallback(void * closure,void * object,WeakPointerCallback callback)1669 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
1670 {
1671 ASSERT(Heap::contains(object));
1672 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
1673 ASSERT(Heap::contains(object) == heapPageForObject);
1674 ThreadState* state = heapPageForObject->threadState();
1675 state->pushWeakObjectPointerCallback(closure, callback);
1676 }
1677
popAndInvokeWeakPointerCallback(Visitor * visitor)1678 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1679 {
1680 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1681 }
1682
prepareForGC()1683 void Heap::prepareForGC()
1684 {
1685 ASSERT(ThreadState::isAnyThreadInGC());
1686 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1687 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1688 (*it)->prepareForGC();
1689 }
1690
collectGarbage(ThreadState::StackState stackState)1691 void Heap::collectGarbage(ThreadState::StackState stackState)
1692 {
1693 ThreadState* state = ThreadState::current();
1694 state->clearGCRequested();
1695
1696 GCScope gcScope(stackState);
1697 // Check if we successfully parked the other threads. If not we bail out of the GC.
1698 if (!gcScope.allThreadsParked()) {
1699 ThreadState::current()->setGCRequested();
1700 return;
1701 }
1702
1703 s_lastGCWasConservative = false;
1704
1705 TRACE_EVENT0("Blink", "Heap::collectGarbage");
1706 TRACE_EVENT_SCOPED_SAMPLING_STATE("Blink", "BlinkGC");
1707 double timeStamp = WTF::currentTimeMS();
1708 #if ENABLE(GC_TRACING)
1709 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1710 #endif
1711
1712 // Disallow allocation during garbage collection (but not
1713 // during the finalization that happens when the gcScope is
1714 // torn down).
1715 NoAllocationScope<AnyThread> noAllocationScope;
1716
1717 prepareForGC();
1718
1719 ThreadState::visitRoots(s_markingVisitor);
1720 // Recursively mark all objects that are reachable from the roots.
1721 while (popAndInvokeTraceCallback(s_markingVisitor)) { }
1722
1723 // Call weak callbacks on objects that may now be pointing to dead
1724 // objects.
1725 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1726
1727 // It is not permitted to trace pointers of live objects in the weak
1728 // callback phase, so the marking stack should still be empty here.
1729 s_markingStack->assertIsEmpty();
1730
1731 #if ENABLE(GC_TRACING)
1732 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1733 #endif
1734
1735 if (blink::Platform::current()) {
1736 uint64_t objectSpaceSize;
1737 uint64_t allocatedSpaceSize;
1738 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
1739 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1740 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1741 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1742 }
1743 }
1744
collectAllGarbage()1745 void Heap::collectAllGarbage()
1746 {
1747 // FIXME: oilpan: we should perform a single GC and everything
1748 // should die. Unfortunately it is not the case for all objects
1749 // because the hierarchy was not completely moved to the heap and
1750 // some heap allocated objects own objects that contain persistents
1751 // pointing to other heap allocated objects.
1752 for (int i = 0; i < 5; i++)
1753 collectGarbage(ThreadState::NoHeapPointersOnStack);
1754 }
1755
setForcePreciseGCForTesting()1756 void Heap::setForcePreciseGCForTesting()
1757 {
1758 ThreadState::current()->setForcePreciseGCForTesting(true);
1759 }
1760
getHeapSpaceSize(uint64_t * objectSpaceSize,uint64_t * allocatedSpaceSize)1761 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
1762 {
1763 *objectSpaceSize = 0;
1764 *allocatedSpaceSize = 0;
1765 ASSERT(ThreadState::isAnyThreadInGC());
1766 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1767 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1768 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1769 *objectSpaceSize += (*it)->stats().totalObjectSpace();
1770 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
1771 }
1772 }
1773
getStats(HeapStats * stats)1774 void Heap::getStats(HeapStats* stats)
1775 {
1776 stats->clear();
1777 ASSERT(ThreadState::isAnyThreadInGC());
1778 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1779 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1780 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1781 HeapStats temp;
1782 (*it)->getStats(temp);
1783 stats->add(&temp);
1784 }
1785 }
1786
isConsistentForGC()1787 bool Heap::isConsistentForGC()
1788 {
1789 ASSERT(ThreadState::isAnyThreadInGC());
1790 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1791 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1792 if (!(*it)->isConsistentForGC())
1793 return false;
1794 }
1795 return true;
1796 }
1797
makeConsistentForGC()1798 void Heap::makeConsistentForGC()
1799 {
1800 ASSERT(ThreadState::isAnyThreadInGC());
1801 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1802 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1803 (*it)->makeConsistentForGC();
1804 }
1805
1806 // Force template instantiations for the types that we need.
1807 template class HeapPage<FinalizedHeapObjectHeader>;
1808 template class HeapPage<HeapObjectHeader>;
1809 template class ThreadHeap<FinalizedHeapObjectHeader>;
1810 template class ThreadHeap<HeapObjectHeader>;
1811
1812 Visitor* Heap::s_markingVisitor;
1813 CallbackStack* Heap::s_markingStack;
1814 CallbackStack* Heap::s_weakCallbackStack;
1815 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1816 bool Heap::s_shutdownCalled = false;
1817 bool Heap::s_lastGCWasConservative = false;
1818 }
1819