1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "config.h"
32 #include "platform/heap/Heap.h"
33
34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/Task.h"
36 #include "platform/TraceEvent.h"
37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/ThreadState.h"
39 #include "public/platform/Platform.h"
40 #include "wtf/AddressSpaceRandomization.h"
41 #include "wtf/Assertions.h"
42 #include "wtf/LeakAnnotations.h"
43 #include "wtf/PassOwnPtr.h"
44 #if ENABLE(GC_PROFILE_MARKING)
45 #include "wtf/HashMap.h"
46 #include "wtf/HashSet.h"
47 #include "wtf/text/StringBuilder.h"
48 #include "wtf/text/StringHash.h"
49 #include <stdio.h>
50 #include <utility>
51 #endif
52 #if ENABLE(GC_PROFILE_HEAP)
53 #include "platform/TracedValue.h"
54 #endif
55
56 #if OS(POSIX)
57 #include <sys/mman.h>
58 #include <unistd.h>
59 #elif OS(WIN)
60 #include <windows.h>
61 #endif
62
63 namespace blink {
64
65 #if ENABLE(GC_PROFILE_MARKING)
classOf(const void * object)66 static String classOf(const void* object)
67 {
68 const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
69 if (gcInfo)
70 return gcInfo->m_className;
71
72 return "unknown";
73 }
74 #endif
75
vTableInitialized(void * objectPointer)76 static bool vTableInitialized(void* objectPointer)
77 {
78 return !!(*reinterpret_cast<Address*>(objectPointer));
79 }
80
81 #if OS(WIN)
IsPowerOf2(size_t power)82 static bool IsPowerOf2(size_t power)
83 {
84 return !((power - 1) & power);
85 }
86 #endif
87
roundToBlinkPageBoundary(void * base)88 static Address roundToBlinkPageBoundary(void* base)
89 {
90 return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
91 }
92
roundToOsPageSize(size_t size)93 static size_t roundToOsPageSize(size_t size)
94 {
95 return (size + osPageSize() - 1) & ~(osPageSize() - 1);
96 }
97
osPageSize()98 size_t osPageSize()
99 {
100 #if OS(POSIX)
101 static const size_t pageSize = getpagesize();
102 #else
103 static size_t pageSize = 0;
104 if (!pageSize) {
105 SYSTEM_INFO info;
106 GetSystemInfo(&info);
107 pageSize = info.dwPageSize;
108 ASSERT(IsPowerOf2(pageSize));
109 }
110 #endif
111 return pageSize;
112 }
113
114 class MemoryRegion {
115 public:
MemoryRegion(Address base,size_t size)116 MemoryRegion(Address base, size_t size)
117 : m_base(base)
118 , m_size(size)
119 {
120 ASSERT(size > 0);
121 }
122
contains(Address addr) const123 bool contains(Address addr) const
124 {
125 return m_base <= addr && addr < (m_base + m_size);
126 }
127
128
contains(const MemoryRegion & other) const129 bool contains(const MemoryRegion& other) const
130 {
131 return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
132 }
133
release()134 void release()
135 {
136 #if OS(POSIX)
137 int err = munmap(m_base, m_size);
138 RELEASE_ASSERT(!err);
139 #else
140 bool success = VirtualFree(m_base, 0, MEM_RELEASE);
141 RELEASE_ASSERT(success);
142 #endif
143 }
144
commit()145 WARN_UNUSED_RETURN bool commit()
146 {
147 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
148 #if OS(POSIX)
149 int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
150 if (!err) {
151 madvise(m_base, m_size, MADV_NORMAL);
152 return true;
153 }
154 return false;
155 #else
156 void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
157 return !!result;
158 #endif
159 }
160
decommit()161 void decommit()
162 {
163 #if OS(POSIX)
164 int err = mprotect(m_base, m_size, PROT_NONE);
165 RELEASE_ASSERT(!err);
166 // FIXME: Consider using MADV_FREE on MacOS.
167 madvise(m_base, m_size, MADV_DONTNEED);
168 #else
169 bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
170 RELEASE_ASSERT(success);
171 #endif
172 }
173
base() const174 Address base() const { return m_base; }
size() const175 size_t size() const { return m_size; }
176
177 private:
178 Address m_base;
179 size_t m_size;
180 };
181
182 // A PageMemoryRegion represents a chunk of reserved virtual address
183 // space containing a number of blink heap pages. On Windows, reserved
184 // virtual address space can only be given back to the system as a
185 // whole. The PageMemoryRegion allows us to do that by keeping track
186 // of the number of pages using it in order to be able to release all
187 // of the virtual address space when there are no more pages using it.
188 class PageMemoryRegion : public MemoryRegion {
189 public:
~PageMemoryRegion()190 ~PageMemoryRegion()
191 {
192 release();
193 }
194
pageRemoved()195 void pageRemoved()
196 {
197 if (!--m_numPages)
198 delete this;
199 }
200
allocate(size_t size,unsigned numPages)201 static PageMemoryRegion* allocate(size_t size, unsigned numPages)
202 {
203 ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
204
205 // Compute a random blink page aligned address for the page memory
206 // region and attempt to get the memory there.
207 Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase());
208 Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
209
210 #if OS(POSIX)
211 Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
212 RELEASE_ASSERT(base != MAP_FAILED);
213 if (base == roundToBlinkPageBoundary(base))
214 return new PageMemoryRegion(base, size, numPages);
215
216 // We failed to get a blink page aligned chunk of
217 // memory. Unmap the chunk that we got and fall back to
218 // overallocating and selecting an aligned sub part of what
219 // we allocate.
220 int error = munmap(base, size);
221 RELEASE_ASSERT(!error);
222 size_t allocationSize = size + blinkPageSize;
223 base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
224 RELEASE_ASSERT(base != MAP_FAILED);
225
226 Address end = base + allocationSize;
227 Address alignedBase = roundToBlinkPageBoundary(base);
228 Address regionEnd = alignedBase + size;
229
230 // If the allocated memory was not blink page aligned release
231 // the memory before the aligned address.
232 if (alignedBase != base)
233 MemoryRegion(base, alignedBase - base).release();
234
235 // Free the additional memory at the end of the page if any.
236 if (regionEnd < end)
237 MemoryRegion(regionEnd, end - regionEnd).release();
238
239 return new PageMemoryRegion(alignedBase, size, numPages);
240 #else
241 Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS));
242 if (base) {
243 ASSERT(base == alignedRandomAddress);
244 return new PageMemoryRegion(base, size, numPages);
245 }
246
247 // We failed to get the random aligned address that we asked
248 // for. Fall back to overallocating. On Windows it is
249 // impossible to partially release a region of memory
250 // allocated by VirtualAlloc. To avoid wasting virtual address
251 // space we attempt to release a large region of memory
252 // returned as a whole and then allocate an aligned region
253 // inside this larger region.
254 size_t allocationSize = size + blinkPageSize;
255 for (int attempt = 0; attempt < 3; attempt++) {
256 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
257 RELEASE_ASSERT(base);
258 VirtualFree(base, 0, MEM_RELEASE);
259
260 Address alignedBase = roundToBlinkPageBoundary(base);
261 base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS));
262 if (base) {
263 ASSERT(base == alignedBase);
264 return new PageMemoryRegion(alignedBase, size, numPages);
265 }
266 }
267
268 // We failed to avoid wasting virtual address space after
269 // several attempts.
270 base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
271 RELEASE_ASSERT(base);
272
273 // FIXME: If base is by accident blink page size aligned
274 // here then we can create two pages out of reserved
275 // space. Do this.
276 Address alignedBase = roundToBlinkPageBoundary(base);
277
278 return new PageMemoryRegion(alignedBase, size, numPages);
279 #endif
280 }
281
282 private:
PageMemoryRegion(Address base,size_t size,unsigned numPages)283 PageMemoryRegion(Address base, size_t size, unsigned numPages)
284 : MemoryRegion(base, size)
285 , m_numPages(numPages)
286 {
287 }
288
289 unsigned m_numPages;
290 };
291
292 // Representation of the memory used for a Blink heap page.
293 //
294 // The representation keeps track of two memory regions:
295 //
296 // 1. The virtual memory reserved from the system in order to be able
297 // to free all the virtual memory reserved. Multiple PageMemory
298 // instances can share the same reserved memory region and
299 // therefore notify the reserved memory region on destruction so
300 // that the system memory can be given back when all PageMemory
301 // instances for that memory are gone.
302 //
303 // 2. The writable memory (a sub-region of the reserved virtual
304 // memory region) that is used for the actual heap page payload.
305 //
306 // Guard pages are created before and after the writable memory.
307 class PageMemory {
308 public:
~PageMemory()309 ~PageMemory()
310 {
311 __lsan_unregister_root_region(m_writable.base(), m_writable.size());
312 m_reserved->pageRemoved();
313 }
314
commit()315 bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
decommit()316 void decommit() { m_writable.decommit(); }
317
writableStart()318 Address writableStart() { return m_writable.base(); }
319
setupPageMemoryInRegion(PageMemoryRegion * region,size_t pageOffset,size_t payloadSize)320 static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
321 {
322 // Setup the payload one OS page into the page memory. The
323 // first os page is the guard page.
324 Address payloadAddress = region->base() + pageOffset + osPageSize();
325 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize));
326 }
327
328 // Allocate a virtual address space for one blink page with the
329 // following layout:
330 //
331 // [ guard os page | ... payload ... | guard os page ]
332 // ^---{ aligned to blink page size }
333 //
allocate(size_t payloadSize)334 static PageMemory* allocate(size_t payloadSize)
335 {
336 ASSERT(payloadSize > 0);
337
338 // Virtual memory allocation routines operate in OS page sizes.
339 // Round up the requested size to nearest os page size.
340 payloadSize = roundToOsPageSize(payloadSize);
341
342 // Overallocate by 2 times OS page size to have space for a
343 // guard page at the beginning and end of blink heap page.
344 size_t allocationSize = payloadSize + 2 * osPageSize();
345 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocationSize, 1);
346 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize);
347 RELEASE_ASSERT(storage->commit());
348 return storage;
349 }
350
351 private:
PageMemory(PageMemoryRegion * reserved,const MemoryRegion & writable)352 PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
353 : m_reserved(reserved)
354 , m_writable(writable)
355 {
356 ASSERT(reserved->contains(writable));
357
358 // Register the writable area of the memory as part of the LSan root set.
359 // Only the writable area is mapped and can contain C++ objects. Those
360 // C++ objects can contain pointers to objects outside of the heap and
361 // should therefore be part of the LSan root set.
362 __lsan_register_root_region(m_writable.base(), m_writable.size());
363 }
364
365
366 PageMemoryRegion* m_reserved;
367 MemoryRegion m_writable;
368 };
369
370 class GCScope {
371 public:
GCScope(ThreadState::StackState stackState)372 explicit GCScope(ThreadState::StackState stackState)
373 : m_state(ThreadState::current())
374 , m_safePointScope(stackState)
375 , m_parkedAllThreads(false)
376 {
377 TRACE_EVENT0("blink_gc", "Heap::GCScope");
378 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
379 if (m_state->isMainThread())
380 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
381
382 m_state->checkThread();
383
384 // FIXME: in an unlikely coincidence that two threads decide
385 // to collect garbage at the same time, avoid doing two GCs in
386 // a row.
387 RELEASE_ASSERT(!m_state->isInGC());
388 RELEASE_ASSERT(!m_state->isSweepInProgress());
389 if (LIKELY(ThreadState::stopThreads())) {
390 m_parkedAllThreads = true;
391 m_state->enterGC();
392 }
393 if (m_state->isMainThread())
394 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
395 }
396
allThreadsParked()397 bool allThreadsParked() { return m_parkedAllThreads; }
398
~GCScope()399 ~GCScope()
400 {
401 // Only cleanup if we parked all threads in which case the GC happened
402 // and we need to resume the other threads.
403 if (LIKELY(m_parkedAllThreads)) {
404 m_state->leaveGC();
405 ASSERT(!m_state->isInGC());
406 ThreadState::resumeThreads();
407 }
408 }
409
410 private:
411 ThreadState* m_state;
412 ThreadState::SafePointScope m_safePointScope;
413 bool m_parkedAllThreads; // False if we fail to park all threads
414 };
415
416 NO_SANITIZE_ADDRESS
isMarked() const417 bool HeapObjectHeader::isMarked() const
418 {
419 checkHeader();
420 unsigned size = asanUnsafeAcquireLoad(&m_size);
421 return size & markBitMask;
422 }
423
424 NO_SANITIZE_ADDRESS
unmark()425 void HeapObjectHeader::unmark()
426 {
427 checkHeader();
428 m_size &= ~markBitMask;
429 }
430
431 NO_SANITIZE_ADDRESS
hasDeadMark() const432 bool HeapObjectHeader::hasDeadMark() const
433 {
434 checkHeader();
435 return m_size & deadBitMask;
436 }
437
438 NO_SANITIZE_ADDRESS
clearDeadMark()439 void HeapObjectHeader::clearDeadMark()
440 {
441 checkHeader();
442 m_size &= ~deadBitMask;
443 }
444
445 NO_SANITIZE_ADDRESS
setDeadMark()446 void HeapObjectHeader::setDeadMark()
447 {
448 ASSERT(!isMarked());
449 checkHeader();
450 m_size |= deadBitMask;
451 }
452
453 #if ENABLE(ASSERT)
454 NO_SANITIZE_ADDRESS
zapMagic()455 void HeapObjectHeader::zapMagic()
456 {
457 m_magic = zappedMagic;
458 }
459 #endif
460
fromPayload(const void * payload)461 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
462 {
463 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
464 HeapObjectHeader* header =
465 reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
466 return header;
467 }
468
finalize(const GCInfo * gcInfo,Address object,size_t objectSize)469 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
470 {
471 ASSERT(gcInfo);
472 if (gcInfo->hasFinalizer()) {
473 gcInfo->m_finalize(object);
474 }
475
476 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
477 // In Debug builds, memory is zapped when it's freed, and the zapped memory is
478 // zeroed out when the memory is reused. Memory is also zapped when using Leak
479 // Sanitizer because the heap is used as a root region for LSan and therefore
480 // pointers in unreachable memory could hide leaks.
481 for (size_t i = 0; i < objectSize; i++)
482 object[i] = finalizedZapValue;
483
484 // Zap the primary vTable entry (secondary vTable entries are not zapped).
485 if (gcInfo->hasVTable()) {
486 *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
487 }
488 #endif
489 // In Release builds, the entire object is zeroed out when it is added to the free list.
490 // This happens right after sweeping the page and before the thread commences execution.
491 }
492
493 NO_SANITIZE_ADDRESS
finalize()494 void FinalizedHeapObjectHeader::finalize()
495 {
496 HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
497 }
498
499 template<typename Header>
unmark()500 void LargeHeapObject<Header>::unmark()
501 {
502 return heapObjectHeader()->unmark();
503 }
504
505 template<typename Header>
isMarked()506 bool LargeHeapObject<Header>::isMarked()
507 {
508 return heapObjectHeader()->isMarked();
509 }
510
511 template<typename Header>
setDeadMark()512 void LargeHeapObject<Header>::setDeadMark()
513 {
514 heapObjectHeader()->setDeadMark();
515 }
516
517 template<typename Header>
checkAndMarkPointer(Visitor * visitor,Address address)518 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
519 {
520 ASSERT(contains(address));
521 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
522 return;
523 #if ENABLE(GC_PROFILE_MARKING)
524 visitor->setHostInfo(&address, "stack");
525 #endif
526 mark(visitor);
527 }
528
529 #if ENABLE(ASSERT)
isUninitializedMemory(void * objectPointer,size_t objectSize)530 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
531 {
532 // Scan through the object's fields and check that they are all zero.
533 Address* objectFields = reinterpret_cast<Address*>(objectPointer);
534 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
535 if (objectFields[i] != 0)
536 return false;
537 }
538 return true;
539 }
540 #endif
541
542 template<>
mark(Visitor * visitor)543 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
544 {
545 if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) {
546 FinalizedHeapObjectHeader* header = heapObjectHeader();
547 visitor->markNoTracing(header);
548 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
549 } else {
550 visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
551 }
552 }
553
554 template<>
mark(Visitor * visitor)555 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
556 {
557 ASSERT(gcInfo());
558 if (gcInfo()->hasVTable() && !vTableInitialized(payload())) {
559 HeapObjectHeader* header = heapObjectHeader();
560 visitor->markNoTracing(header);
561 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
562 } else {
563 visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
564 }
565 }
566
567 template<>
finalize()568 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
569 {
570 heapObjectHeader()->finalize();
571 }
572
573 template<>
finalize()574 void LargeHeapObject<HeapObjectHeader>::finalize()
575 {
576 ASSERT(gcInfo());
577 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
578 }
579
fromPayload(const void * payload)580 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
581 {
582 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
583 FinalizedHeapObjectHeader* header =
584 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
585 return header;
586 }
587
588 template<typename Header>
ThreadHeap(ThreadState * state,int index)589 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
590 : m_currentAllocationPoint(0)
591 , m_remainingAllocationSize(0)
592 , m_firstPage(0)
593 , m_firstLargeHeapObject(0)
594 , m_firstPageAllocatedDuringSweeping(0)
595 , m_lastPageAllocatedDuringSweeping(0)
596 , m_mergePoint(0)
597 , m_biggestFreeListIndex(0)
598 , m_threadState(state)
599 , m_index(index)
600 , m_numberOfNormalPages(0)
601 , m_promptlyFreedCount(0)
602 {
603 clearFreeLists();
604 }
605
606 template<typename Header>
~ThreadHeap()607 ThreadHeap<Header>::~ThreadHeap()
608 {
609 ASSERT(!m_firstPage);
610 ASSERT(!m_firstLargeHeapObject);
611 }
612
613 template<typename Header>
cleanupPages()614 void ThreadHeap<Header>::cleanupPages()
615 {
616 clearFreeLists();
617 flushHeapContainsCache();
618
619 // Add the ThreadHeap's pages to the orphanedPagePool.
620 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
621 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
622 m_firstPage = 0;
623
624 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
625 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
626 m_firstLargeHeapObject = 0;
627 }
628
629 template<typename Header>
outOfLineAllocate(size_t size,const GCInfo * gcInfo)630 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
631 {
632 size_t allocationSize = allocationSizeFromSize(size);
633 if (threadState()->shouldGC()) {
634 if (threadState()->shouldForceConservativeGC())
635 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
636 else
637 threadState()->setGCRequested();
638 }
639 ensureCurrentAllocation(allocationSize, gcInfo);
640 return allocate(size, gcInfo);
641 }
642
643 template<typename Header>
allocateFromFreeList(size_t minSize)644 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
645 {
646 size_t bucketSize = 1 << m_biggestFreeListIndex;
647 int i = m_biggestFreeListIndex;
648 for (; i > 0; i--, bucketSize >>= 1) {
649 if (bucketSize < minSize)
650 break;
651 FreeListEntry* entry = m_freeLists[i];
652 if (entry) {
653 m_biggestFreeListIndex = i;
654 entry->unlink(&m_freeLists[i]);
655 setAllocationPoint(entry->address(), entry->size());
656 ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
657 return true;
658 }
659 }
660 m_biggestFreeListIndex = i;
661 return false;
662 }
663
664 template<typename Header>
ensureCurrentAllocation(size_t minSize,const GCInfo * gcInfo)665 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
666 {
667 ASSERT(minSize >= allocationGranularity);
668 if (remainingAllocationSize() >= minSize)
669 return;
670
671 if (remainingAllocationSize() > 0) {
672 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
673 setAllocationPoint(0, 0);
674 }
675 if (allocateFromFreeList(minSize))
676 return;
677 if (coalesce(minSize) && allocateFromFreeList(minSize))
678 return;
679 addPageToHeap(gcInfo);
680 bool success = allocateFromFreeList(minSize);
681 RELEASE_ASSERT(success);
682 }
683
684 template<typename Header>
heapPageFromAddress(Address address)685 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
686 {
687 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
688 if (page->contains(address))
689 return page;
690 }
691 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
692 if (page->contains(address))
693 return page;
694 }
695 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
696 // Check that large pages are blinkPageSize aligned (modulo the
697 // osPageSize for the guard page).
698 ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
699 if (current->contains(address))
700 return current;
701 }
702 return 0;
703 }
704
705 #if ENABLE(GC_PROFILE_MARKING)
706 template<typename Header>
findGCInfoOfLargeHeapObject(Address address)707 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
708 {
709 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
710 if (current->contains(address))
711 return current->gcInfo();
712 }
713 return 0;
714 }
715 #endif
716
717 #if ENABLE(GC_PROFILE_HEAP)
718 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
719 template<typename Header>
snapshot(TracedValue * json,ThreadState::SnapshotInfo * info)720 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
721 {
722 size_t previousPageCount = info->pageCount;
723
724 json->beginArray("pages");
725 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
726 // FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots.
727 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
728 json->beginArray();
729 json->pushInteger(reinterpret_cast<intptr_t>(page));
730 page->snapshot(json, info);
731 json->endArray();
732 } else {
733 page->snapshot(0, info);
734 }
735 }
736 json->endArray();
737
738 json->beginArray("largeObjects");
739 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
740 json->beginDictionary();
741 current->snapshot(json, info);
742 json->endDictionary();
743 }
744 json->endArray();
745
746 json->setInteger("pageCount", info->pageCount - previousPageCount);
747 }
748 #endif
749
750 template<typename Header>
addToFreeList(Address address,size_t size)751 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
752 {
753 ASSERT(heapPageFromAddress(address));
754 ASSERT(heapPageFromAddress(address + size - 1));
755 ASSERT(size < blinkPagePayloadSize());
756 // The free list entries are only pointer aligned (but when we allocate
757 // from them we are 8 byte aligned due to the header size).
758 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
759 ASSERT(!(size & allocationMask));
760 ASAN_POISON_MEMORY_REGION(address, size);
761 FreeListEntry* entry;
762 if (size < sizeof(*entry)) {
763 // Create a dummy header with only a size and freelist bit set.
764 ASSERT(size >= sizeof(BasicObjectHeader));
765 // Free list encode the size to mark the lost memory as freelist memory.
766 new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
767 // This memory gets lost. Sweeping can reclaim it.
768 return;
769 }
770 entry = new (NotNull, address) FreeListEntry(size);
771 #if defined(ADDRESS_SANITIZER)
772 // For ASan we don't add the entry to the free lists until the asanDeferMemoryReuseCount
773 // reaches zero. However we always add entire pages to ensure that adding a new page will
774 // increase the allocation space.
775 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
776 return;
777 #endif
778 int index = bucketIndexForSize(size);
779 entry->link(&m_freeLists[index]);
780 if (!m_lastFreeListEntries[index])
781 m_lastFreeListEntries[index] = entry;
782 if (index > m_biggestFreeListIndex)
783 m_biggestFreeListIndex = index;
784 }
785
786 template<typename Header>
promptlyFreeObject(Header * header)787 void ThreadHeap<Header>::promptlyFreeObject(Header* header)
788 {
789 ASSERT(!m_threadState->isSweepInProgress());
790 header->checkHeader();
791 Address address = reinterpret_cast<Address>(header);
792 Address payload = header->payload();
793 size_t size = header->size();
794 size_t payloadSize = header->payloadSize();
795 BaseHeapPage* page = pageHeaderFromObject(address);
796 ASSERT(size > 0);
797 ASSERT(page == heapPageFromAddress(address));
798
799 {
800 ThreadState::NoSweepScope scope(m_threadState);
801 HeapObjectHeader::finalize(header->gcInfo(), payload, payloadSize);
802 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
803 memset(payload, 0, payloadSize);
804 #endif
805 header->markPromptlyFreed();
806 }
807
808 page->addToPromptlyFreedSize(size);
809 m_promptlyFreedCount++;
810 }
811
812 template<typename Header>
coalesce(size_t minSize)813 bool ThreadHeap<Header>::coalesce(size_t minSize)
814 {
815 if (m_threadState->isSweepInProgress())
816 return false;
817
818 if (m_promptlyFreedCount < 256)
819 return false;
820
821 // The smallest bucket able to satisfy an allocation request for minSize is
822 // the bucket where all free-list entries are guarantied to be larger than
823 // minSize. That bucket is one larger than the bucket minSize would go into.
824 size_t neededBucketIndex = bucketIndexForSize(minSize) + 1;
825 size_t neededFreeEntrySize = 1 << neededBucketIndex;
826 size_t neededPromptlyFreedSize = neededFreeEntrySize * 3;
827 size_t foundFreeEntrySize = 0;
828
829 // Bailout early on large requests because it is unlikely we will find a free-list entry.
830 if (neededPromptlyFreedSize >= blinkPageSize)
831 return false;
832
833 TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (unsigned)minSize , "neededSize", (unsigned)neededFreeEntrySize);
834
835 // Search for a coalescing candidate.
836 ASSERT(!ownsNonEmptyAllocationArea());
837 size_t pageCount = 0;
838 HeapPage<Header>* page = m_firstPage;
839 while (page) {
840 // Only consider one of the first 'n' pages. A "younger" page is more likely to have freed backings.
841 if (++pageCount > numberOfPagesToConsiderForCoalescing) {
842 page = 0;
843 break;
844 }
845 // Only coalesce pages with "sufficient" promptly freed space.
846 if (page->promptlyFreedSize() >= neededPromptlyFreedSize) {
847 break;
848 }
849 page = page->next();
850 }
851
852 // If we found a likely candidate, fully coalesce all its promptly-freed entries.
853 if (page) {
854 page->clearObjectStartBitMap();
855 page->resetPromptlyFreedSize();
856 size_t freedCount = 0;
857 Address startOfGap = page->payload();
858 for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
859 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
860 ASSERT(basicHeader->size() > 0);
861 ASSERT(basicHeader->size() < blinkPagePayloadSize());
862
863 if (basicHeader->isPromptlyFreed()) {
864 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->payloadSize());
865 size_t size = basicHeader->size();
866 ASSERT(size >= sizeof(Header));
867 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
868 memset(headerAddress, 0, sizeof(Header));
869 #endif
870 ++freedCount;
871 headerAddress += size;
872 continue;
873 }
874
875 if (startOfGap != headerAddress) {
876 size_t size = headerAddress - startOfGap;
877 addToFreeList(startOfGap, size);
878 if (size > foundFreeEntrySize)
879 foundFreeEntrySize = size;
880 }
881
882 headerAddress += basicHeader->size();
883 startOfGap = headerAddress;
884 }
885
886 if (startOfGap != page->end()) {
887 size_t size = page->end() - startOfGap;
888 addToFreeList(startOfGap, size);
889 if (size > foundFreeEntrySize)
890 foundFreeEntrySize = size;
891 }
892
893 // Check before subtracting because freedCount might not be balanced with freed entries.
894 if (freedCount < m_promptlyFreedCount)
895 m_promptlyFreedCount -= freedCount;
896 else
897 m_promptlyFreedCount = 0;
898 }
899
900 TRACE_EVENT_END1("blink_gc", "ThreadHeap::coalesce", "foundFreeEntrySize", (unsigned)foundFreeEntrySize);
901
902 if (foundFreeEntrySize < neededFreeEntrySize) {
903 // If coalescing failed, reset the freed count to delay coalescing again.
904 m_promptlyFreedCount = 0;
905 return false;
906 }
907
908 return true;
909 }
910
911 template<typename Header>
allocateLargeObject(size_t size,const GCInfo * gcInfo)912 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
913 {
914 // Caller already added space for object header and rounded up to allocation alignment
915 ASSERT(!(size & allocationMask));
916
917 size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
918
919 // Ensure that there is enough space for alignment. If the header
920 // is not a multiple of 8 bytes we will allocate an extra
921 // headerPadding<Header> bytes to ensure it 8 byte aligned.
922 allocationSize += headerPadding<Header>();
923
924 // If ASan is supported we add allocationGranularity bytes to the allocated space and
925 // poison that to detect overflows
926 #if defined(ADDRESS_SANITIZER)
927 allocationSize += allocationGranularity;
928 #endif
929 if (threadState()->shouldGC())
930 threadState()->setGCRequested();
931 Heap::flushHeapDoesNotContainCache();
932 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
933 Address largeObjectAddress = pageMemory->writableStart();
934 Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
935 memset(headerAddress, 0, size);
936 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
937 Address result = headerAddress + sizeof(*header);
938 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
939 LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
940
941 // Poison the object header and allocationGranularity bytes after the object
942 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
943 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
944 largeObject->link(&m_firstLargeHeapObject);
945 stats().increaseAllocatedSpace(largeObject->size());
946 stats().increaseObjectSpace(largeObject->payloadSize());
947 return result;
948 }
949
950 template<typename Header>
freeLargeObject(LargeHeapObject<Header> * object,LargeHeapObject<Header> ** previousNext)951 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
952 {
953 flushHeapContainsCache();
954 object->unlink(previousNext);
955 object->finalize();
956
957 // Unpoison the object header and allocationGranularity bytes after the
958 // object before freeing.
959 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
960 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
961
962 if (object->terminating()) {
963 ASSERT(ThreadState::current()->isTerminating());
964 // The thread is shutting down so this object is being removed as part
965 // of a thread local GC. In that case the object could be traced in the
966 // next global GC either due to a dead object being traced via a
967 // conservative pointer or due to a programming error where an object
968 // in another thread heap keeps a dangling pointer to this object.
969 // To guard against this we put the large object memory in the
970 // orphanedPagePool to ensure it is still reachable. After the next global
971 // GC it can be released assuming no rogue/dangling pointers refer to
972 // it.
973 // NOTE: large objects are not moved to the free page pool as it is
974 // unlikely they can be reused due to their individual sizes.
975 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
976 } else {
977 ASSERT(!ThreadState::current()->isTerminating());
978 PageMemory* memory = object->storage();
979 object->~LargeHeapObject<Header>();
980 delete memory;
981 }
982 }
983
984 template<typename DataType>
PagePool()985 PagePool<DataType>::PagePool()
986 {
987 for (int i = 0; i < NumberOfHeaps; ++i) {
988 m_pool[i] = 0;
989 }
990 }
991
~FreePagePool()992 FreePagePool::~FreePagePool()
993 {
994 for (int index = 0; index < NumberOfHeaps; ++index) {
995 while (PoolEntry* entry = m_pool[index]) {
996 m_pool[index] = entry->next;
997 PageMemory* memory = entry->data;
998 ASSERT(memory);
999 delete memory;
1000 delete entry;
1001 }
1002 }
1003 }
1004
addFreePage(int index,PageMemory * memory)1005 void FreePagePool::addFreePage(int index, PageMemory* memory)
1006 {
1007 // When adding a page to the pool we decommit it to ensure it is unused
1008 // while in the pool. This also allows the physical memory, backing the
1009 // page, to be given back to the OS.
1010 memory->decommit();
1011 MutexLocker locker(m_mutex[index]);
1012 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
1013 m_pool[index] = entry;
1014 }
1015
takeFreePage(int index)1016 PageMemory* FreePagePool::takeFreePage(int index)
1017 {
1018 MutexLocker locker(m_mutex[index]);
1019 while (PoolEntry* entry = m_pool[index]) {
1020 m_pool[index] = entry->next;
1021 PageMemory* memory = entry->data;
1022 ASSERT(memory);
1023 delete entry;
1024 if (memory->commit())
1025 return memory;
1026
1027 // We got some memory, but failed to commit it, try again.
1028 delete memory;
1029 }
1030 return 0;
1031 }
1032
~OrphanedPagePool()1033 OrphanedPagePool::~OrphanedPagePool()
1034 {
1035 for (int index = 0; index < NumberOfHeaps; ++index) {
1036 while (PoolEntry* entry = m_pool[index]) {
1037 m_pool[index] = entry->next;
1038 BaseHeapPage* page = entry->data;
1039 delete entry;
1040 PageMemory* memory = page->storage();
1041 ASSERT(memory);
1042 page->~BaseHeapPage();
1043 delete memory;
1044 }
1045 }
1046 }
1047
addOrphanedPage(int index,BaseHeapPage * page)1048 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
1049 {
1050 page->markOrphaned();
1051 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
1052 m_pool[index] = entry;
1053 }
1054
1055 NO_SANITIZE_ADDRESS
decommitOrphanedPages()1056 void OrphanedPagePool::decommitOrphanedPages()
1057 {
1058 #if ENABLE(ASSERT)
1059 // No locking needed as all threads are at safepoints at this point in time.
1060 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1061 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1062 ASSERT((*it)->isAtSafePoint());
1063 #endif
1064
1065 for (int index = 0; index < NumberOfHeaps; ++index) {
1066 PoolEntry* entry = m_pool[index];
1067 PoolEntry** prevNext = &m_pool[index];
1068 while (entry) {
1069 BaseHeapPage* page = entry->data;
1070 if (page->tracedAfterOrphaned()) {
1071 // If the orphaned page was traced in the last GC it is not
1072 // decommited. We only decommit a page, ie. put it in the
1073 // memory pool, when the page has no objects pointing to it.
1074 // We remark the page as orphaned to clear the tracedAfterOrphaned
1075 // flag and any object trace bits that were set during tracing.
1076 page->markOrphaned();
1077 prevNext = &entry->next;
1078 entry = entry->next;
1079 continue;
1080 }
1081
1082 // Page was not traced. Check if we should reuse the memory or just
1083 // free it. Large object memory is not reused, but freed, normal
1084 // blink heap pages are reused.
1085 // NOTE: We call the destructor before freeing or adding to the
1086 // free page pool.
1087 PageMemory* memory = page->storage();
1088 if (page->isLargeObject()) {
1089 page->~BaseHeapPage();
1090 delete memory;
1091 } else {
1092 page->~BaseHeapPage();
1093 // Clear out the page's memory before adding it to the free page
1094 // pool to ensure it is zero filled when being reused.
1095 clearMemory(memory);
1096 Heap::freePagePool()->addFreePage(index, memory);
1097 }
1098
1099 PoolEntry* deadEntry = entry;
1100 entry = entry->next;
1101 *prevNext = entry;
1102 delete deadEntry;
1103 }
1104 }
1105 }
1106
1107 NO_SANITIZE_ADDRESS
clearMemory(PageMemory * memory)1108 void OrphanedPagePool::clearMemory(PageMemory* memory)
1109 {
1110 #if defined(ADDRESS_SANITIZER)
1111 // Don't use memset when running with ASan since this needs to zap
1112 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
1113 // only works for code in this method and not for calls to memset.
1114 Address base = memory->writableStart();
1115 for (Address current = base; current < base + blinkPagePayloadSize(); ++current)
1116 *current = 0;
1117 #else
1118 memset(memory->writableStart(), 0, blinkPagePayloadSize());
1119 #endif
1120 }
1121
1122 #if ENABLE(ASSERT)
contains(void * object)1123 bool OrphanedPagePool::contains(void* object)
1124 {
1125 for (int index = 0; index < NumberOfHeaps; ++index) {
1126 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1127 BaseHeapPage* page = entry->data;
1128 if (page->contains(reinterpret_cast<Address>(object)))
1129 return true;
1130 }
1131 }
1132 return false;
1133 }
1134 #endif
1135
1136 template<>
addPageToHeap(const GCInfo * gcInfo)1137 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1138 {
1139 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
1140 // the heap should be unused (ie. 0).
1141 allocatePage(0);
1142 }
1143
1144 template<>
addPageToHeap(const GCInfo * gcInfo)1145 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1146 {
1147 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
1148 // since it is the same for all objects
1149 ASSERT(gcInfo);
1150 allocatePage(gcInfo);
1151 }
1152
1153 template <typename Header>
removePageFromHeap(HeapPage<Header> * page)1154 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1155 {
1156 MutexLocker locker(m_threadState->sweepMutex());
1157 flushHeapContainsCache();
1158 if (page->terminating()) {
1159 // The thread is shutting down so this page is being removed as part
1160 // of a thread local GC. In that case the page could be accessed in the
1161 // next global GC either due to a dead object being traced via a
1162 // conservative pointer or due to a programming error where an object
1163 // in another thread heap keeps a dangling pointer to this object.
1164 // To guard against this we put the page in the orphanedPagePool to
1165 // ensure it is still reachable. After the next global GC it can be
1166 // decommitted and moved to the page pool assuming no rogue/dangling
1167 // pointers refer to it.
1168 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
1169 } else {
1170 PageMemory* memory = page->storage();
1171 page->~HeapPage<Header>();
1172 Heap::freePagePool()->addFreePage(m_index, memory);
1173 }
1174 }
1175
1176 template<typename Header>
allocatePage(const GCInfo * gcInfo)1177 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
1178 {
1179 Heap::flushHeapDoesNotContainCache();
1180 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1181 // We continue allocating page memory until we succeed in getting one.
1182 // Since the FreePagePool is global other threads could use all the
1183 // newly allocated page memory before this thread calls takeFreePage.
1184 while (!pageMemory) {
1185 // Allocate a memory region for blinkPagesPerRegion pages that
1186 // will each have the following layout.
1187 //
1188 // [ guard os page | ... payload ... | guard os page ]
1189 // ^---{ aligned to blink page size }
1190 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion);
1191 // Setup the PageMemory object for each of the pages in the
1192 // region.
1193 size_t offset = 0;
1194 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1195 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize()));
1196 offset += blinkPageSize;
1197 }
1198 pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1199 }
1200 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
1201 // Use a separate list for pages allocated during sweeping to make
1202 // sure that we do not accidentally sweep objects that have been
1203 // allocated during sweeping.
1204 if (m_threadState->isSweepInProgress()) {
1205 if (!m_lastPageAllocatedDuringSweeping)
1206 m_lastPageAllocatedDuringSweeping = page;
1207 page->link(&m_firstPageAllocatedDuringSweeping);
1208 } else {
1209 page->link(&m_firstPage);
1210 }
1211 ++m_numberOfNormalPages;
1212 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
1213 }
1214
1215 #if ENABLE(ASSERT)
1216 template<typename Header>
pagesToBeSweptContains(Address address)1217 bool ThreadHeap<Header>::pagesToBeSweptContains(Address address)
1218 {
1219 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
1220 if (page->contains(address))
1221 return true;
1222 }
1223 return false;
1224 }
1225
1226 template<typename Header>
pagesAllocatedDuringSweepingContains(Address address)1227 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
1228 {
1229 for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1230 if (page->contains(address))
1231 return true;
1232 }
1233 return false;
1234 }
1235
1236 template<typename Header>
getScannedStats(HeapStats & scannedStats)1237 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
1238 {
1239 ASSERT(!m_firstPageAllocatedDuringSweeping);
1240 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1241 page->getStats(scannedStats);
1242 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
1243 current->getStats(scannedStats);
1244 }
1245 #endif
1246
1247 template<typename Header>
sweepNormalPages(HeapStats * stats)1248 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1249 {
1250 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1251 HeapPage<Header>* page = m_firstPage;
1252 HeapPage<Header>** previousNext = &m_firstPage;
1253 HeapPage<Header>* previous = 0;
1254 while (page) {
1255 page->resetPromptlyFreedSize();
1256 if (page->isEmpty()) {
1257 HeapPage<Header>* unused = page;
1258 if (unused == m_mergePoint)
1259 m_mergePoint = previous;
1260 page = page->next();
1261 HeapPage<Header>::unlink(this, unused, previousNext);
1262 --m_numberOfNormalPages;
1263 } else {
1264 page->sweep(stats, this);
1265 previousNext = &page->m_next;
1266 previous = page;
1267 page = page->next();
1268 }
1269 }
1270 }
1271
1272 template<typename Header>
sweepLargePages(HeapStats * stats)1273 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1274 {
1275 TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1276 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1277 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1278 if (current->isMarked()) {
1279 stats->increaseAllocatedSpace(current->size());
1280 stats->increaseObjectSpace(current->payloadSize());
1281 current->unmark();
1282 previousNext = ¤t->m_next;
1283 current = current->next();
1284 } else {
1285 LargeHeapObject<Header>* next = current->next();
1286 freeLargeObject(current, previousNext);
1287 current = next;
1288 }
1289 }
1290 }
1291
1292
1293 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1294 // sweeping to catch cases where dead objects touch each other. This is not
1295 // turned on by default because it also triggers for cases that are safe.
1296 // Examples of such safe cases are context life cycle observers and timers
1297 // embedded in garbage collected objects.
1298 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1299
1300 template<typename Header>
sweep(HeapStats * stats)1301 void ThreadHeap<Header>::sweep(HeapStats* stats)
1302 {
1303 ASSERT(isConsistentForSweeping());
1304 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1305 // When using ASan do a pre-sweep where all unmarked objects are
1306 // poisoned before calling their finalizer methods. This can catch
1307 // the case where the finalizer of an object tries to modify
1308 // another object as part of finalization.
1309 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1310 page->poisonUnmarkedObjects();
1311 #endif
1312 sweepNormalPages(stats);
1313 sweepLargePages(stats);
1314 }
1315
1316 template<typename Header>
postSweepProcessing()1317 void ThreadHeap<Header>::postSweepProcessing()
1318 {
1319 // If pages have been allocated during sweeping, link them into
1320 // the list of pages.
1321 if (m_firstPageAllocatedDuringSweeping) {
1322 m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1323 m_firstPage = m_firstPageAllocatedDuringSweeping;
1324 m_lastPageAllocatedDuringSweeping = 0;
1325 m_firstPageAllocatedDuringSweeping = 0;
1326 }
1327 }
1328
1329 #if ENABLE(ASSERT)
1330 template<typename Header>
isConsistentForSweeping()1331 bool ThreadHeap<Header>::isConsistentForSweeping()
1332 {
1333 // A thread heap is consistent for sweeping if none of the pages to
1334 // be swept contain a freelist block or the current allocation
1335 // point.
1336 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1337 for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) {
1338 if (pagesToBeSweptContains(freeListEntry->address())) {
1339 return false;
1340 }
1341 ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()));
1342 }
1343 }
1344 if (ownsNonEmptyAllocationArea()) {
1345 ASSERT(pagesToBeSweptContains(currentAllocationPoint())
1346 || pagesAllocatedDuringSweepingContains(currentAllocationPoint()));
1347 return !pagesToBeSweptContains(currentAllocationPoint());
1348 }
1349 return true;
1350 }
1351 #endif
1352
1353 template<typename Header>
makeConsistentForSweeping()1354 void ThreadHeap<Header>::makeConsistentForSweeping()
1355 {
1356 if (ownsNonEmptyAllocationArea())
1357 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
1358 setAllocationPoint(0, 0);
1359 clearFreeLists();
1360 }
1361
1362 template<typename Header>
clearLiveAndMarkDead()1363 void ThreadHeap<Header>::clearLiveAndMarkDead()
1364 {
1365 ASSERT(isConsistentForSweeping());
1366 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1367 page->clearLiveAndMarkDead();
1368 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
1369 if (current->isMarked())
1370 current->unmark();
1371 else
1372 current->setDeadMark();
1373 }
1374 }
1375
1376 template<typename Header>
clearFreeLists()1377 void ThreadHeap<Header>::clearFreeLists()
1378 {
1379 m_promptlyFreedCount = 0;
1380 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1381 m_freeLists[i] = 0;
1382 m_lastFreeListEntries[i] = 0;
1383 }
1384 }
1385
bucketIndexForSize(size_t size)1386 int BaseHeap::bucketIndexForSize(size_t size)
1387 {
1388 ASSERT(size > 0);
1389 int index = -1;
1390 while (size) {
1391 size >>= 1;
1392 index++;
1393 }
1394 return index;
1395 }
1396
1397 template<typename Header>
HeapPage(PageMemory * storage,ThreadHeap<Header> * heap,const GCInfo * gcInfo)1398 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1399 : BaseHeapPage(storage, gcInfo, heap->threadState())
1400 , m_next(0)
1401 {
1402 COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
1403 m_objectStartBitMapComputed = false;
1404 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1405 heap->stats().increaseAllocatedSpace(blinkPageSize);
1406 }
1407
1408 template<typename Header>
link(HeapPage ** prevNext)1409 void HeapPage<Header>::link(HeapPage** prevNext)
1410 {
1411 m_next = *prevNext;
1412 *prevNext = this;
1413 }
1414
1415 template<typename Header>
unlink(ThreadHeap<Header> * heap,HeapPage * unused,HeapPage ** prevNext)1416 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPage** prevNext)
1417 {
1418 *prevNext = unused->m_next;
1419 heap->removePageFromHeap(unused);
1420 }
1421
1422 template<typename Header>
getStats(HeapStats & stats)1423 void HeapPage<Header>::getStats(HeapStats& stats)
1424 {
1425 stats.increaseAllocatedSpace(blinkPageSize);
1426 Address headerAddress = payload();
1427 ASSERT(headerAddress != end());
1428 do {
1429 Header* header = reinterpret_cast<Header*>(headerAddress);
1430 if (!header->isFree())
1431 stats.increaseObjectSpace(header->payloadSize());
1432 ASSERT(header->size() < blinkPagePayloadSize());
1433 headerAddress += header->size();
1434 ASSERT(headerAddress <= end());
1435 } while (headerAddress < end());
1436 }
1437
1438 template<typename Header>
isEmpty()1439 bool HeapPage<Header>::isEmpty()
1440 {
1441 BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1442 return header->isFree() && (header->size() == payloadSize());
1443 }
1444
1445 template<typename Header>
sweep(HeapStats * stats,ThreadHeap<Header> * heap)1446 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
1447 {
1448 clearObjectStartBitMap();
1449 stats->increaseAllocatedSpace(blinkPageSize);
1450 Address startOfGap = payload();
1451 for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1452 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
1453 ASSERT(basicHeader->size() > 0);
1454 ASSERT(basicHeader->size() < blinkPagePayloadSize());
1455
1456 if (basicHeader->isFree()) {
1457 size_t size = basicHeader->size();
1458 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1459 // Zero the memory in the free list header to maintain the
1460 // invariant that memory on the free list is zero filled.
1461 // The rest of the memory is already on the free list and is
1462 // therefore already zero filled.
1463 if (size < sizeof(FreeListEntry))
1464 memset(headerAddress, 0, size);
1465 else
1466 memset(headerAddress, 0, sizeof(FreeListEntry));
1467 #endif
1468 headerAddress += size;
1469 continue;
1470 }
1471 // At this point we know this is a valid object of type Header
1472 Header* header = static_cast<Header*>(basicHeader);
1473
1474 if (!header->isMarked()) {
1475 // For ASan we unpoison the specific object when calling the finalizer and
1476 // poison it again when done to allow the object's own finalizer to operate
1477 // on the object, but not have other finalizers be allowed to access it.
1478 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
1479 finalize(header);
1480 size_t size = header->size();
1481 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1482 // This memory will be added to the freelist. Maintain the invariant
1483 // that memory on the freelist is zero filled.
1484 memset(headerAddress, 0, size);
1485 #endif
1486 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1487 headerAddress += size;
1488 continue;
1489 }
1490
1491 if (startOfGap != headerAddress)
1492 heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1493 header->unmark();
1494 headerAddress += header->size();
1495 stats->increaseObjectSpace(header->payloadSize());
1496 startOfGap = headerAddress;
1497 }
1498 if (startOfGap != end())
1499 heap->addToFreeList(startOfGap, end() - startOfGap);
1500 }
1501
1502 template<typename Header>
clearLiveAndMarkDead()1503 void HeapPage<Header>::clearLiveAndMarkDead()
1504 {
1505 for (Address headerAddress = payload(); headerAddress < end();) {
1506 Header* header = reinterpret_cast<Header*>(headerAddress);
1507 ASSERT(header->size() < blinkPagePayloadSize());
1508 // Check if a free list entry first since we cannot call
1509 // isMarked on a free list entry.
1510 if (header->isFree()) {
1511 headerAddress += header->size();
1512 continue;
1513 }
1514 if (header->isMarked())
1515 header->unmark();
1516 else
1517 header->setDeadMark();
1518 headerAddress += header->size();
1519 }
1520 }
1521
1522 template<typename Header>
populateObjectStartBitMap()1523 void HeapPage<Header>::populateObjectStartBitMap()
1524 {
1525 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1526 Address start = payload();
1527 for (Address headerAddress = start; headerAddress < end();) {
1528 Header* header = reinterpret_cast<Header*>(headerAddress);
1529 size_t objectOffset = headerAddress - start;
1530 ASSERT(!(objectOffset & allocationMask));
1531 size_t objectStartNumber = objectOffset / allocationGranularity;
1532 size_t mapIndex = objectStartNumber / 8;
1533 ASSERT(mapIndex < objectStartBitMapSize);
1534 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1535 headerAddress += header->size();
1536 ASSERT(headerAddress <= end());
1537 }
1538 m_objectStartBitMapComputed = true;
1539 }
1540
1541 template<typename Header>
clearObjectStartBitMap()1542 void HeapPage<Header>::clearObjectStartBitMap()
1543 {
1544 m_objectStartBitMapComputed = false;
1545 }
1546
numberOfLeadingZeroes(uint8_t byte)1547 static int numberOfLeadingZeroes(uint8_t byte)
1548 {
1549 if (!byte)
1550 return 8;
1551 int result = 0;
1552 if (byte <= 0x0F) {
1553 result += 4;
1554 byte = byte << 4;
1555 }
1556 if (byte <= 0x3F) {
1557 result += 2;
1558 byte = byte << 2;
1559 }
1560 if (byte <= 0x7F)
1561 result++;
1562 return result;
1563 }
1564
1565 template<typename Header>
findHeaderFromAddress(Address address)1566 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1567 {
1568 if (address < payload())
1569 return 0;
1570 if (!isObjectStartBitMapComputed())
1571 populateObjectStartBitMap();
1572 size_t objectOffset = address - payload();
1573 size_t objectStartNumber = objectOffset / allocationGranularity;
1574 size_t mapIndex = objectStartNumber / 8;
1575 ASSERT(mapIndex < objectStartBitMapSize);
1576 size_t bit = objectStartNumber & 7;
1577 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1578 while (!byte) {
1579 ASSERT(mapIndex > 0);
1580 byte = m_objectStartBitMap[--mapIndex];
1581 }
1582 int leadingZeroes = numberOfLeadingZeroes(byte);
1583 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1584 objectOffset = objectStartNumber * allocationGranularity;
1585 Address objectAddress = objectOffset + payload();
1586 Header* header = reinterpret_cast<Header*>(objectAddress);
1587 if (header->isFree())
1588 return 0;
1589 return header;
1590 }
1591
1592 template<typename Header>
checkAndMarkPointer(Visitor * visitor,Address address)1593 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1594 {
1595 ASSERT(contains(address));
1596 Header* header = findHeaderFromAddress(address);
1597 if (!header || header->hasDeadMark())
1598 return;
1599
1600 #if ENABLE(GC_PROFILE_MARKING)
1601 visitor->setHostInfo(&address, "stack");
1602 #endif
1603 if (hasVTable(header) && !vTableInitialized(header->payload())) {
1604 visitor->markNoTracing(header);
1605 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
1606 } else {
1607 visitor->mark(header, traceCallback(header));
1608 }
1609 }
1610
1611 #if ENABLE(GC_PROFILE_MARKING)
1612 template<typename Header>
findGCInfo(Address address)1613 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1614 {
1615 if (address < payload())
1616 return 0;
1617
1618 if (gcInfo()) // for non FinalizedObjectHeader
1619 return gcInfo();
1620
1621 Header* header = findHeaderFromAddress(address);
1622 if (!header)
1623 return 0;
1624
1625 return header->gcInfo();
1626 }
1627 #endif
1628
1629 #if ENABLE(GC_PROFILE_HEAP)
1630 template<typename Header>
snapshot(TracedValue * json,ThreadState::SnapshotInfo * info)1631 void HeapPage<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1632 {
1633 Header* header = 0;
1634 for (Address addr = payload(); addr < end(); addr += header->size()) {
1635 header = reinterpret_cast<Header*>(addr);
1636 if (json)
1637 json->pushInteger(header->encodedSize());
1638 if (header->isFree()) {
1639 info->freeSize += header->size();
1640 continue;
1641 }
1642
1643 const GCInfo* gcinfo = header->gcInfo() ? header->gcInfo() : gcInfo();
1644 size_t tag = info->getClassTag(gcinfo);
1645 size_t age = header->age();
1646 if (json)
1647 json->pushInteger(tag);
1648 if (header->isMarked()) {
1649 info->liveCount[tag] += 1;
1650 info->liveSize[tag] += header->size();
1651 // Count objects that are live when promoted to the final generation.
1652 if (age == maxHeapObjectAge - 1)
1653 info->generations[tag][maxHeapObjectAge] += 1;
1654 header->incAge();
1655 } else {
1656 info->deadCount[tag] += 1;
1657 info->deadSize[tag] += header->size();
1658 // Count objects that are dead before the final generation.
1659 if (age < maxHeapObjectAge)
1660 info->generations[tag][age] += 1;
1661 }
1662 }
1663 }
1664 #endif
1665
1666 #if defined(ADDRESS_SANITIZER)
1667 template<typename Header>
poisonUnmarkedObjects()1668 void HeapPage<Header>::poisonUnmarkedObjects()
1669 {
1670 for (Address headerAddress = payload(); headerAddress < end(); ) {
1671 Header* header = reinterpret_cast<Header*>(headerAddress);
1672 ASSERT(header->size() < blinkPagePayloadSize());
1673
1674 if (!header->isFree() && !header->isMarked())
1675 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1676 headerAddress += header->size();
1677 }
1678 }
1679 #endif
1680
1681 template<>
finalize(FinalizedHeapObjectHeader * header)1682 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1683 {
1684 header->finalize();
1685 }
1686
1687 template<>
finalize(HeapObjectHeader * header)1688 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1689 {
1690 ASSERT(gcInfo());
1691 HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1692 }
1693
1694 template<>
traceCallback(HeapObjectHeader * header)1695 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1696 {
1697 ASSERT(gcInfo());
1698 return gcInfo()->m_trace;
1699 }
1700
1701 template<>
traceCallback(FinalizedHeapObjectHeader * header)1702 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1703 {
1704 return header->traceCallback();
1705 }
1706
1707 template<>
hasVTable(HeapObjectHeader * header)1708 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1709 {
1710 ASSERT(gcInfo());
1711 return gcInfo()->hasVTable();
1712 }
1713
1714 template<>
hasVTable(FinalizedHeapObjectHeader * header)1715 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1716 {
1717 return header->hasVTable();
1718 }
1719
1720 template<typename Header>
getStats(HeapStats & stats)1721 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1722 {
1723 stats.increaseAllocatedSpace(size());
1724 stats.increaseObjectSpace(payloadSize());
1725 }
1726
1727 #if ENABLE(GC_PROFILE_HEAP)
1728 template<typename Header>
snapshot(TracedValue * json,ThreadState::SnapshotInfo * info)1729 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1730 {
1731 Header* header = heapObjectHeader();
1732 size_t tag = info->getClassTag(header->gcInfo());
1733 size_t age = header->age();
1734 if (isMarked()) {
1735 info->liveCount[tag] += 1;
1736 info->liveSize[tag] += header->size();
1737 // Count objects that are live when promoted to the final generation.
1738 if (age == maxHeapObjectAge - 1)
1739 info->generations[tag][maxHeapObjectAge] += 1;
1740 header->incAge();
1741 } else {
1742 info->deadCount[tag] += 1;
1743 info->deadSize[tag] += header->size();
1744 // Count objects that are dead before the final generation.
1745 if (age < maxHeapObjectAge)
1746 info->generations[tag][age] += 1;
1747 }
1748
1749 if (json) {
1750 json->setInteger("class", tag);
1751 json->setInteger("size", header->size());
1752 json->setInteger("isMarked", isMarked());
1753 }
1754 }
1755 #endif
1756
1757 template<typename Entry>
flush()1758 void HeapExtentCache<Entry>::flush()
1759 {
1760 if (m_hasEntries) {
1761 for (int i = 0; i < numberOfEntries; i++)
1762 m_entries[i] = Entry();
1763 m_hasEntries = false;
1764 }
1765 }
1766
1767 template<typename Entry>
hash(Address address)1768 size_t HeapExtentCache<Entry>::hash(Address address)
1769 {
1770 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1771 value ^= value >> numberOfEntriesLog2;
1772 value ^= value >> (numberOfEntriesLog2 * 2);
1773 value &= numberOfEntries - 1;
1774 return value & ~1; // Returns only even number.
1775 }
1776
1777 template<typename Entry>
lookup(Address address)1778 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1779 {
1780 size_t index = hash(address);
1781 ASSERT(!(index & 1));
1782 Address cachePage = roundToBlinkPageStart(address);
1783 if (m_entries[index].address() == cachePage)
1784 return m_entries[index].result();
1785 if (m_entries[index + 1].address() == cachePage)
1786 return m_entries[index + 1].result();
1787 return 0;
1788 }
1789
1790 template<typename Entry>
addEntry(Address address,typename Entry::LookupResult entry)1791 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry)
1792 {
1793 m_hasEntries = true;
1794 size_t index = hash(address);
1795 ASSERT(!(index & 1));
1796 Address cachePage = roundToBlinkPageStart(address);
1797 m_entries[index + 1] = m_entries[index];
1798 m_entries[index] = Entry(cachePage, entry);
1799 }
1800
1801 // These should not be needed, but it seems impossible to persuade clang to
1802 // instantiate the template functions and export them from a shared library, so
1803 // we add these in the non-templated subclass, which does not have that issue.
addEntry(Address address,BaseHeapPage * page)1804 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1805 {
1806 HeapExtentCache<PositiveEntry>::addEntry(address, page);
1807 }
1808
lookup(Address address)1809 BaseHeapPage* HeapContainsCache::lookup(Address address)
1810 {
1811 return HeapExtentCache<PositiveEntry>::lookup(address);
1812 }
1813
flushHeapDoesNotContainCache()1814 void Heap::flushHeapDoesNotContainCache()
1815 {
1816 s_heapDoesNotContainCache->flush();
1817 }
1818
1819 // The marking mutex is used to ensure sequential access to data
1820 // structures during marking. The marking mutex needs to be acquired
1821 // during marking when elements are taken from the global marking
1822 // stack or when elements are added to the global ephemeron,
1823 // post-marking, and weak processing stacks. In debug mode the mutex
1824 // also needs to be acquired when asserts use the heap contains
1825 // caches.
markingMutex()1826 static Mutex& markingMutex()
1827 {
1828 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
1829 return mutex;
1830 }
1831
markingCondition()1832 static ThreadCondition& markingCondition()
1833 {
1834 AtomicallyInitializedStatic(ThreadCondition&, condition = *new ThreadCondition);
1835 return condition;
1836 }
1837
markNoTracingCallback(Visitor * visitor,void * object)1838 static void markNoTracingCallback(Visitor* visitor, void* object)
1839 {
1840 visitor->markNoTracing(object);
1841 }
1842
1843 class MarkingVisitor : public Visitor {
1844 public:
1845 #if ENABLE(GC_PROFILE_MARKING)
1846 typedef HashSet<uintptr_t> LiveObjectSet;
1847 typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1848 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1849 #endif
1850
MarkingVisitor(CallbackStack * markingStack)1851 MarkingVisitor(CallbackStack* markingStack) : m_markingStack(markingStack)
1852 {
1853 }
1854
visitHeader(HeapObjectHeader * header,const void * objectPointer,TraceCallback callback)1855 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1856 {
1857 ASSERT(header);
1858 #if ENABLE(ASSERT)
1859 {
1860 // Check that we are not marking objects that are outside
1861 // the heap by calling Heap::contains. However we cannot
1862 // call Heap::contains when outside a GC and we call mark
1863 // when doing weakness for ephemerons. Hence we only check
1864 // when called within.
1865 MutexLocker locker(markingMutex());
1866 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphanedPage(header));
1867 }
1868 #endif
1869 ASSERT(objectPointer);
1870 if (header->isMarked())
1871 return;
1872 header->mark();
1873 #if ENABLE(GC_PROFILE_MARKING)
1874 MutexLocker locker(objectGraphMutex());
1875 String className(classOf(objectPointer));
1876 {
1877 LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1878 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1879 }
1880 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1881 ASSERT(result.isNewEntry);
1882 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1883 #endif
1884 if (callback)
1885 Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPointer), callback);
1886 }
1887
mark(HeapObjectHeader * header,TraceCallback callback)1888 virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1889 {
1890 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1891 // version to correctly find the payload.
1892 visitHeader(header, header->payload(), callback);
1893 }
1894
mark(FinalizedHeapObjectHeader * header,TraceCallback callback)1895 virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1896 {
1897 // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1898 // version to correctly find the payload.
1899 visitHeader(header, header->payload(), callback);
1900 }
1901
mark(const void * objectPointer,TraceCallback callback)1902 virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1903 {
1904 if (!objectPointer)
1905 return;
1906 FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1907 visitHeader(header, header->payload(), callback);
1908 }
1909
registerDelayedMarkNoTracing(const void * object)1910 virtual void registerDelayedMarkNoTracing(const void* object) OVERRIDE
1911 {
1912 Heap::pushPostMarkingCallback(const_cast<void*>(object), markNoTracingCallback);
1913 }
1914
registerWeakMembers(const void * closure,const void * containingObject,WeakPointerCallback callback)1915 virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1916 {
1917 Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1918 }
1919
registerWeakTable(const void * closure,EphemeronCallback iterationCallback,EphemeronCallback iterationDoneCallback)1920 virtual void registerWeakTable(const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
1921 {
1922 Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, iterationDoneCallback);
1923 }
1924
1925 #if ENABLE(ASSERT)
weakTableRegistered(const void * closure)1926 virtual bool weakTableRegistered(const void* closure)
1927 {
1928 return Heap::weakTableRegistered(closure);
1929 }
1930 #endif
1931
isMarked(const void * objectPointer)1932 virtual bool isMarked(const void* objectPointer) OVERRIDE
1933 {
1934 return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1935 }
1936
1937 // This macro defines the necessary visitor methods for typed heaps
1938 #define DEFINE_VISITOR_METHODS(Type) \
1939 virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1940 { \
1941 if (!objectPointer) \
1942 return; \
1943 HeapObjectHeader* header = \
1944 HeapObjectHeader::fromPayload(objectPointer); \
1945 visitHeader(header, header->payload(), callback); \
1946 } \
1947 virtual bool isMarked(const Type* objectPointer) OVERRIDE \
1948 { \
1949 return HeapObjectHeader::fromPayload(objectPointer)->isMarked(); \
1950 }
1951
FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)1952 FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1953 #undef DEFINE_VISITOR_METHODS
1954
1955 #if ENABLE(GC_PROFILE_MARKING)
1956 void reportStats()
1957 {
1958 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
1959 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
1960 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
1961
1962 if (it->key == "blink::Document")
1963 reportStillAlive(it->value, previouslyLive().get(it->key));
1964
1965 fprintf(stderr, "\n");
1966 }
1967
1968 previouslyLive().swap(currentlyLive());
1969 currentlyLive().clear();
1970
1971 for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
1972 dumpPathToObjectFromObjectGraph(objectGraph(), *it);
1973 }
1974 }
1975
reportStillAlive(LiveObjectSet current,LiveObjectSet previous)1976 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
1977 {
1978 int count = 0;
1979
1980 fprintf(stderr, " [previously %u]", previous.size());
1981 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1982 if (previous.find(*it) == previous.end())
1983 continue;
1984 count++;
1985 }
1986
1987 if (!count)
1988 return;
1989
1990 fprintf(stderr, " {survived 2GCs %d: ", count);
1991 for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1992 if (previous.find(*it) == previous.end())
1993 continue;
1994 fprintf(stderr, "%ld", *it);
1995 if (--count)
1996 fprintf(stderr, ", ");
1997 }
1998 ASSERT(!count);
1999 fprintf(stderr, "}");
2000 }
2001
dumpPathToObjectFromObjectGraph(const ObjectGraph & graph,uintptr_t target)2002 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
2003 {
2004 ObjectGraph::const_iterator it = graph.find(target);
2005 if (it == graph.end())
2006 return;
2007 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
2008 while (it != graph.end()) {
2009 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.utf8().data());
2010 it = graph.find(it->value.first);
2011 }
2012 fprintf(stderr, "\n");
2013 }
2014
dumpPathToObjectOnNextGC(void * p)2015 static void dumpPathToObjectOnNextGC(void* p)
2016 {
2017 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
2018 }
2019
objectGraphMutex()2020 static Mutex& objectGraphMutex()
2021 {
2022 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2023 return mutex;
2024 }
2025
previouslyLive()2026 static LiveObjectMap& previouslyLive()
2027 {
2028 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2029 return map;
2030 }
2031
currentlyLive()2032 static LiveObjectMap& currentlyLive()
2033 {
2034 DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2035 return map;
2036 }
2037
objectGraph()2038 static ObjectGraph& objectGraph()
2039 {
2040 DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
2041 return graph;
2042 }
2043
objectsToFindPath()2044 static HashSet<uintptr_t>& objectsToFindPath()
2045 {
2046 DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
2047 return set;
2048 }
2049 #endif
2050
2051 protected:
registerWeakCell(void ** cell,WeakPointerCallback callback)2052 virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
2053 {
2054 Heap::pushWeakCellPointerCallback(cell, callback);
2055 }
2056
2057 private:
2058 CallbackStack* m_markingStack;
2059 };
2060
init()2061 void Heap::init()
2062 {
2063 ThreadState::init();
2064 s_markingStack = new CallbackStack();
2065 s_postMarkingCallbackStack = new CallbackStack();
2066 s_weakCallbackStack = new CallbackStack();
2067 s_ephemeronStack = new CallbackStack();
2068 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2069 s_markingVisitor = new MarkingVisitor(s_markingStack);
2070 s_freePagePool = new FreePagePool();
2071 s_orphanedPagePool = new OrphanedPagePool();
2072 s_markingThreads = new Vector<OwnPtr<blink::WebThread> >();
2073 if (blink::Platform::current()) {
2074 // FIXME: We should let the amount of threads scale with the
2075 // amount of processors in the system instead of hardcoding
2076 // it.
2077 for (int i = 0; i < numberOfMarkingThreads; i++)
2078 s_markingThreads->append(adoptPtr(blink::Platform::current()->createThread("Blink Heap Marker Thread")));
2079 }
2080 }
2081
shutdown()2082 void Heap::shutdown()
2083 {
2084 s_shutdownCalled = true;
2085 ThreadState::shutdownHeapIfNecessary();
2086 }
2087
doShutdown()2088 void Heap::doShutdown()
2089 {
2090 // We don't want to call doShutdown() twice.
2091 if (!s_markingVisitor)
2092 return;
2093
2094 ASSERT(!ThreadState::isAnyThreadInGC());
2095 ASSERT(!ThreadState::attachedThreads().size());
2096 delete s_markingThreads;
2097 s_markingThreads = 0;
2098 delete s_markingVisitor;
2099 s_markingVisitor = 0;
2100 delete s_heapDoesNotContainCache;
2101 s_heapDoesNotContainCache = 0;
2102 delete s_freePagePool;
2103 s_freePagePool = 0;
2104 delete s_orphanedPagePool;
2105 s_orphanedPagePool = 0;
2106 delete s_weakCallbackStack;
2107 s_weakCallbackStack = 0;
2108 delete s_postMarkingCallbackStack;
2109 s_postMarkingCallbackStack = 0;
2110 delete s_markingStack;
2111 s_markingStack = 0;
2112 delete s_ephemeronStack;
2113 s_ephemeronStack = 0;
2114 ThreadState::shutdown();
2115 }
2116
contains(Address address)2117 BaseHeapPage* Heap::contains(Address address)
2118 {
2119 ASSERT(ThreadState::isAnyThreadInGC());
2120 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2121 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2122 BaseHeapPage* page = (*it)->contains(address);
2123 if (page)
2124 return page;
2125 }
2126 return 0;
2127 }
2128
2129 #if ENABLE(ASSERT)
containedInHeapOrOrphanedPage(void * object)2130 bool Heap::containedInHeapOrOrphanedPage(void* object)
2131 {
2132 return contains(object) || orphanedPagePool()->contains(object);
2133 }
2134 #endif
2135
checkAndMarkPointer(Visitor * visitor,Address address)2136 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2137 {
2138 ASSERT(ThreadState::isAnyThreadInGC());
2139
2140 #if !ENABLE(ASSERT)
2141 if (s_heapDoesNotContainCache->lookup(address))
2142 return 0;
2143 #endif
2144
2145 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2146 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2147 if ((*it)->checkAndMarkPointer(visitor, address)) {
2148 // Pointer was in a page of that thread. If it actually pointed
2149 // into an object then that object was found and marked.
2150 ASSERT(!s_heapDoesNotContainCache->lookup(address));
2151 s_lastGCWasConservative = true;
2152 return address;
2153 }
2154 }
2155
2156 #if !ENABLE(ASSERT)
2157 s_heapDoesNotContainCache->addEntry(address, true);
2158 #else
2159 if (!s_heapDoesNotContainCache->lookup(address))
2160 s_heapDoesNotContainCache->addEntry(address, true);
2161 #endif
2162 return 0;
2163 }
2164
2165 #if ENABLE(GC_PROFILE_MARKING)
findGCInfo(Address address)2166 const GCInfo* Heap::findGCInfo(Address address)
2167 {
2168 return ThreadState::findGCInfoFromAllThreads(address);
2169 }
2170 #endif
2171
2172 #if ENABLE(GC_PROFILE_MARKING)
dumpPathToObjectOnNextGC(void * p)2173 void Heap::dumpPathToObjectOnNextGC(void* p)
2174 {
2175 static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
2176 }
2177
createBacktraceString()2178 String Heap::createBacktraceString()
2179 {
2180 int framesToShow = 3;
2181 int stackFrameSize = 16;
2182 ASSERT(stackFrameSize >= framesToShow);
2183 typedef void* FramePointer;
2184 FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePointer) * stackFrameSize));
2185 WTFGetBacktrace(stackFrame, &stackFrameSize);
2186
2187 StringBuilder builder;
2188 builder.append("Persistent");
2189 bool didAppendFirstName = false;
2190 // Skip frames before/including "blink::Persistent".
2191 bool didSeePersistent = false;
2192 for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) {
2193 FrameToNameScope frameToName(stackFrame[i]);
2194 if (!frameToName.nullableName())
2195 continue;
2196 if (strstr(frameToName.nullableName(), "blink::Persistent")) {
2197 didSeePersistent = true;
2198 continue;
2199 }
2200 if (!didSeePersistent)
2201 continue;
2202 if (!didAppendFirstName) {
2203 didAppendFirstName = true;
2204 builder.append(" ... Backtrace:");
2205 }
2206 builder.append("\n\t");
2207 builder.append(frameToName.nullableName());
2208 --framesToShow;
2209 }
2210 return builder.toString().replace("blink::", "");
2211 }
2212 #endif
2213
pushTraceCallback(CallbackStack * stack,void * object,TraceCallback callback)2214 void Heap::pushTraceCallback(CallbackStack* stack, void* object, TraceCallback callback)
2215 {
2216 #if ENABLE(ASSERT)
2217 {
2218 MutexLocker locker(markingMutex());
2219 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
2220 }
2221 #endif
2222 CallbackStack::Item* slot = stack->allocateEntry();
2223 *slot = CallbackStack::Item(object, callback);
2224 }
2225
2226 template<CallbackInvocationMode Mode>
popAndInvokeTraceCallback(CallbackStack * stack,Visitor * visitor)2227 bool Heap::popAndInvokeTraceCallback(CallbackStack* stack, Visitor* visitor)
2228 {
2229 CallbackStack::Item* item = stack->pop();
2230 if (!item)
2231 return false;
2232 // If the object being traced is located on a page which is dead don't
2233 // trace it. This can happen when a conservative GC kept a dead object
2234 // alive which pointed to a (now gone) object on the cleaned up page.
2235 // Also, if doing a thread local GC, don't trace objects that are located
2236 // on other thread's heaps, ie, pages where the terminating flag is not set.
2237 BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
2238 if (Mode == GlobalMarking && heapPage->orphaned()) {
2239 // When doing a global GC we should only get a trace callback to an orphaned
2240 // page if the GC is conservative. If it is not conservative there is
2241 // a bug in the code where we have a dangling pointer to a page
2242 // on the dead thread.
2243 RELEASE_ASSERT(Heap::lastGCWasConservative());
2244 heapPage->setTracedAfterOrphaned();
2245 return true;
2246 }
2247 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->terminating()))
2248 return true;
2249
2250 #if ENABLE(GC_PROFILE_MARKING)
2251 visitor->setHostInfo(item->object(), classOf(item->object()));
2252 #endif
2253 item->call(visitor);
2254 return true;
2255 }
2256
pushPostMarkingCallback(void * object,TraceCallback callback)2257 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
2258 {
2259 MutexLocker locker(markingMutex());
2260 ASSERT(!Heap::orphanedPagePool()->contains(object));
2261 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
2262 *slot = CallbackStack::Item(object, callback);
2263 }
2264
popAndInvokePostMarkingCallback(Visitor * visitor)2265 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
2266 {
2267 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
2268 item->call(visitor);
2269 return true;
2270 }
2271 return false;
2272 }
2273
pushWeakCellPointerCallback(void ** cell,WeakPointerCallback callback)2274 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
2275 {
2276 MutexLocker locker(markingMutex());
2277 ASSERT(!Heap::orphanedPagePool()->contains(cell));
2278 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry();
2279 *slot = CallbackStack::Item(cell, callback);
2280 }
2281
pushWeakObjectPointerCallback(void * closure,void * object,WeakPointerCallback callback)2282 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
2283 {
2284 MutexLocker locker(markingMutex());
2285 ASSERT(Heap::contains(object));
2286 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
2287 ASSERT(!heapPageForObject->orphaned());
2288 ASSERT(Heap::contains(object) == heapPageForObject);
2289 ThreadState* state = heapPageForObject->threadState();
2290 state->pushWeakObjectPointerCallback(closure, callback);
2291 }
2292
popAndInvokeWeakPointerCallback(Visitor * visitor)2293 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
2294 {
2295 // For weak processing we should never reach orphaned pages since orphaned
2296 // pages are not traced and thus objects on those pages are never be
2297 // registered as objects on orphaned pages. We cannot assert this here since
2298 // we might have an off-heap collection. We assert it in
2299 // Heap::pushWeakObjectPointerCallback.
2300 if (CallbackStack::Item* item = s_weakCallbackStack->pop()) {
2301 item->call(visitor);
2302 return true;
2303 }
2304 return false;
2305 }
2306
registerWeakTable(void * table,EphemeronCallback iterationCallback,EphemeronCallback iterationDoneCallback)2307 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2308 {
2309 {
2310 MutexLocker locker(markingMutex());
2311 // Check that the ephemeron table being pushed onto the stack is not on an
2312 // orphaned page.
2313 ASSERT(!Heap::orphanedPagePool()->contains(table));
2314 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
2315 *slot = CallbackStack::Item(table, iterationCallback);
2316 }
2317
2318 // Register a post-marking callback to tell the tables that
2319 // ephemeron iteration is complete.
2320 pushPostMarkingCallback(table, iterationDoneCallback);
2321 }
2322
2323 #if ENABLE(ASSERT)
weakTableRegistered(const void * table)2324 bool Heap::weakTableRegistered(const void* table)
2325 {
2326 MutexLocker locker(markingMutex());
2327 ASSERT(s_ephemeronStack);
2328 return s_ephemeronStack->hasCallbackForObject(table);
2329 }
2330 #endif
2331
prepareForGC()2332 void Heap::prepareForGC()
2333 {
2334 ASSERT(ThreadState::isAnyThreadInGC());
2335 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2336 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2337 (*it)->prepareForGC();
2338 }
2339
collectGarbage(ThreadState::StackState stackState,ThreadState::CauseOfGC cause)2340 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::CauseOfGC cause)
2341 {
2342 ThreadState* state = ThreadState::current();
2343 state->clearGCRequested();
2344
2345 GCScope gcScope(stackState);
2346 // Check if we successfully parked the other threads. If not we bail out of the GC.
2347 if (!gcScope.allThreadsParked()) {
2348 ThreadState::current()->setGCRequested();
2349 return;
2350 }
2351
2352 if (state->isMainThread())
2353 ScriptForbiddenScope::enter();
2354
2355 s_lastGCWasConservative = false;
2356
2357 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2358 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2359 "forced", cause == ThreadState::ForcedGC);
2360 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2361 double timeStamp = WTF::currentTimeMS();
2362 #if ENABLE(GC_PROFILE_MARKING)
2363 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2364 #endif
2365
2366 // Disallow allocation during garbage collection (but not
2367 // during the finalization that happens when the gcScope is
2368 // torn down).
2369 NoAllocationScope<AnyThread> noAllocationScope;
2370
2371 prepareForGC();
2372
2373 // 1. trace persistent roots.
2374 ThreadState::visitPersistentRoots(s_markingVisitor);
2375
2376 // 2. trace objects reachable from the persistent roots including ephemerons.
2377 processMarkingStackInParallel();
2378
2379 // 3. trace objects reachable from the stack. We do this independent of the
2380 // given stackState since other threads might have a different stack state.
2381 ThreadState::visitStackRoots(s_markingVisitor);
2382
2383 // 4. trace objects reachable from the stack "roots" including ephemerons.
2384 // Only do the processing if we found a pointer to an object on one of the
2385 // thread stacks.
2386 if (lastGCWasConservative())
2387 processMarkingStackInParallel();
2388
2389 postMarkingProcessing();
2390 globalWeakProcessing();
2391
2392 // After a global marking we know that any orphaned page that was not reached
2393 // cannot be reached in a subsequent GC. This is due to a thread either having
2394 // swept its heap or having done a "poor mans sweep" in prepareForGC which marks
2395 // objects that are dead, but not swept in the previous GC as dead. In this GC's
2396 // marking we check that any object marked as dead is not traced. E.g. via a
2397 // conservatively found pointer or a programming error with an object containing
2398 // a dangling pointer.
2399 orphanedPagePool()->decommitOrphanedPages();
2400
2401 #if ENABLE(GC_PROFILE_MARKING)
2402 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2403 #endif
2404
2405 if (blink::Platform::current()) {
2406 uint64_t objectSpaceSize;
2407 uint64_t allocatedSpaceSize;
2408 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2409 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2410 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2411 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2412 }
2413
2414 if (state->isMainThread())
2415 ScriptForbiddenScope::exit();
2416 }
2417
collectGarbageForTerminatingThread(ThreadState * state)2418 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2419 {
2420 // We explicitly do not enter a safepoint while doing thread specific
2421 // garbage collection since we don't want to allow a global GC at the
2422 // same time as a thread local GC.
2423
2424 {
2425 NoAllocationScope<AnyThread> noAllocationScope;
2426
2427 state->enterGC();
2428 state->prepareForGC();
2429
2430 // 1. trace the thread local persistent roots. For thread local GCs we
2431 // don't trace the stack (ie. no conservative scanning) since this is
2432 // only called during thread shutdown where there should be no objects
2433 // on the stack.
2434 // We also assume that orphaned pages have no objects reachable from
2435 // persistent handles on other threads or CrossThreadPersistents. The
2436 // only cases where this could happen is if a subsequent conservative
2437 // global GC finds a "pointer" on the stack or due to a programming
2438 // error where an object has a dangling cross-thread pointer to an
2439 // object on this heap.
2440 state->visitPersistents(s_markingVisitor);
2441
2442 // 2. trace objects reachable from the thread's persistent roots
2443 // including ephemerons.
2444 processMarkingStack<ThreadLocalMarking>();
2445
2446 postMarkingProcessing();
2447 globalWeakProcessing();
2448
2449 state->leaveGC();
2450 }
2451 state->performPendingSweep();
2452 }
2453
processMarkingStackEntries(int * runningMarkingThreads)2454 void Heap::processMarkingStackEntries(int* runningMarkingThreads)
2455 {
2456 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackEntries");
2457 CallbackStack stack;
2458 MarkingVisitor visitor(&stack);
2459 {
2460 MutexLocker locker(markingMutex());
2461 stack.takeBlockFrom(s_markingStack);
2462 }
2463 while (!stack.isEmpty()) {
2464 while (popAndInvokeTraceCallback<GlobalMarking>(&stack, &visitor)) { }
2465 {
2466 MutexLocker locker(markingMutex());
2467 stack.takeBlockFrom(s_markingStack);
2468 }
2469 }
2470 {
2471 MutexLocker locker(markingMutex());
2472 if (!--(*runningMarkingThreads))
2473 markingCondition().signal();
2474 }
2475 }
2476
processMarkingStackOnMultipleThreads()2477 void Heap::processMarkingStackOnMultipleThreads()
2478 {
2479 int runningMarkingThreads = s_markingThreads->size() + 1;
2480
2481 for (size_t i = 0; i < s_markingThreads->size(); ++i)
2482 s_markingThreads->at(i)->postTask(new Task(WTF::bind(Heap::processMarkingStackEntries, &runningMarkingThreads)));
2483
2484 processMarkingStackEntries(&runningMarkingThreads);
2485
2486 // Wait for the other threads to finish their part of marking.
2487 MutexLocker locker(markingMutex());
2488 while (runningMarkingThreads)
2489 markingCondition().wait(markingMutex());
2490 }
2491
processMarkingStackInParallel()2492 void Heap::processMarkingStackInParallel()
2493 {
2494 static const size_t sizeOfStackForParallelMarking = 2 * CallbackStack::blockSize;
2495 // Ephemeron fixed point loop run on the garbage collecting thread.
2496 do {
2497 // Iteratively mark all objects that are reachable from the objects
2498 // currently pushed onto the marking stack. Do so in parallel if there
2499 // are multiple blocks on the global marking stack.
2500 if (s_markingStack->sizeExceeds(sizeOfStackForParallelMarking)) {
2501 processMarkingStackOnMultipleThreads();
2502 } else {
2503 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2504 while (popAndInvokeTraceCallback<GlobalMarking>(s_markingStack, s_markingVisitor)) { }
2505 }
2506
2507 // Mark any strong pointers that have now become reachable in ephemeron
2508 // maps.
2509 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2510 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2511
2512 // Rerun loop if ephemeron processing queued more objects for tracing.
2513 } while (!s_markingStack->isEmpty());
2514 }
2515
2516 template<CallbackInvocationMode Mode>
processMarkingStack()2517 void Heap::processMarkingStack()
2518 {
2519 // Ephemeron fixed point loop.
2520 do {
2521 // Iteratively mark all objects that are reachable from the objects
2522 // currently pushed onto the marking stack. If Mode is ThreadLocalMarking
2523 // don't continue tracing if the trace hits an object on another thread's
2524 // heap.
2525 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2526 while (popAndInvokeTraceCallback<Mode>(s_markingStack, s_markingVisitor)) { }
2527
2528 // Mark any strong pointers that have now become reachable in ephemeron
2529 // maps.
2530 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2531 s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2532
2533 // Rerun loop if ephemeron processing queued more objects for tracing.
2534 } while (!s_markingStack->isEmpty());
2535 }
2536
postMarkingProcessing()2537 void Heap::postMarkingProcessing()
2538 {
2539 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing");
2540 // Call post-marking callbacks including:
2541 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
2542 // (specifically to clear the queued bits for weak hash tables), and
2543 // 2. the markNoTracing callbacks on collection backings to mark them
2544 // if they are only reachable from their front objects.
2545 while (popAndInvokePostMarkingCallback(s_markingVisitor)) { }
2546
2547 s_ephemeronStack->clear();
2548
2549 // Post-marking callbacks should not trace any objects and
2550 // therefore the marking stack should be empty after the
2551 // post-marking callbacks.
2552 ASSERT(s_markingStack->isEmpty());
2553 }
2554
globalWeakProcessing()2555 void Heap::globalWeakProcessing()
2556 {
2557 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing");
2558 // Call weak callbacks on objects that may now be pointing to dead
2559 // objects.
2560 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
2561
2562 // It is not permitted to trace pointers of live objects in the weak
2563 // callback phase, so the marking stack should still be empty here.
2564 ASSERT(s_markingStack->isEmpty());
2565 }
2566
collectAllGarbage()2567 void Heap::collectAllGarbage()
2568 {
2569 // FIXME: oilpan: we should perform a single GC and everything
2570 // should die. Unfortunately it is not the case for all objects
2571 // because the hierarchy was not completely moved to the heap and
2572 // some heap allocated objects own objects that contain persistents
2573 // pointing to other heap allocated objects.
2574 for (int i = 0; i < 5; i++)
2575 collectGarbage(ThreadState::NoHeapPointersOnStack, ThreadState::ForcedGC);
2576 }
2577
setForcePreciseGCForTesting()2578 void Heap::setForcePreciseGCForTesting()
2579 {
2580 ThreadState::current()->setForcePreciseGCForTesting(true);
2581 }
2582
2583 template<typename Header>
prepareHeapForTermination()2584 void ThreadHeap<Header>::prepareHeapForTermination()
2585 {
2586 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2587 page->setTerminating();
2588 }
2589 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
2590 current->setTerminating();
2591 }
2592 }
2593
2594 template<typename Header>
split(int numberOfNormalPages)2595 BaseHeap* ThreadHeap<Header>::split(int numberOfNormalPages)
2596 {
2597 // Create a new split off thread heap containing
2598 // |numberOfNormalPages| of the pages of this ThreadHeap for
2599 // parallel sweeping. The split off thread heap will be merged
2600 // with this heap at the end of sweeping and the temporary
2601 // ThreadHeap object will be deallocated after the merge.
2602 ASSERT(numberOfNormalPages > 0);
2603 ThreadHeap<Header>* splitOff = new ThreadHeap(m_threadState, m_index);
2604 HeapPage<Header>* splitPoint = m_firstPage;
2605 for (int i = 1; i < numberOfNormalPages; i++)
2606 splitPoint = splitPoint->next();
2607 splitOff->m_firstPage = m_firstPage;
2608 m_firstPage = splitPoint->m_next;
2609 splitOff->m_mergePoint = splitPoint;
2610 splitOff->m_numberOfNormalPages = numberOfNormalPages;
2611 m_numberOfNormalPages -= numberOfNormalPages;
2612 splitPoint->m_next = 0;
2613 return splitOff;
2614 }
2615
2616 template<typename Header>
merge(BaseHeap * splitOffBase)2617 void ThreadHeap<Header>::merge(BaseHeap* splitOffBase)
2618 {
2619 ThreadHeap<Header>* splitOff = static_cast<ThreadHeap<Header>*>(splitOffBase);
2620 // If the mergePoint is zero all split off pages became empty in
2621 // this round and we don't have to merge. There are no pages and
2622 // nothing on the freelists.
2623 ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0);
2624 if (splitOff->m_mergePoint) {
2625 // Link the split off pages into the beginning of the list again.
2626 splitOff->m_mergePoint->m_next = m_firstPage;
2627 m_firstPage = splitOff->m_firstPage;
2628 m_numberOfNormalPages += splitOff->m_numberOfNormalPages;
2629 splitOff->m_firstPage = 0;
2630 // Merge free lists.
2631 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
2632 if (!m_freeLists[i]) {
2633 m_freeLists[i] = splitOff->m_freeLists[i];
2634 } else if (splitOff->m_freeLists[i]) {
2635 m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]);
2636 m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i];
2637 }
2638 }
2639 }
2640 delete splitOffBase;
2641 }
2642
getHeapSpaceSize(uint64_t * objectSpaceSize,uint64_t * allocatedSpaceSize)2643 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
2644 {
2645 *objectSpaceSize = 0;
2646 *allocatedSpaceSize = 0;
2647 ASSERT(ThreadState::isAnyThreadInGC());
2648 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2649 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2650 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2651 *objectSpaceSize += (*it)->stats().totalObjectSpace();
2652 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2653 }
2654 }
2655
getStats(HeapStats * stats)2656 void Heap::getStats(HeapStats* stats)
2657 {
2658 stats->clear();
2659 ASSERT(ThreadState::isAnyThreadInGC());
2660 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2661 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2662 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2663 HeapStats temp;
2664 (*it)->getStats(temp);
2665 stats->add(&temp);
2666 }
2667 }
2668
2669 #if ENABLE(ASSERT)
isConsistentForSweeping()2670 bool Heap::isConsistentForSweeping()
2671 {
2672 ASSERT(ThreadState::isAnyThreadInGC());
2673 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2674 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2675 if (!(*it)->isConsistentForSweeping())
2676 return false;
2677 }
2678 return true;
2679 }
2680 #endif
2681
makeConsistentForSweeping()2682 void Heap::makeConsistentForSweeping()
2683 {
2684 ASSERT(ThreadState::isAnyThreadInGC());
2685 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2686 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2687 (*it)->makeConsistentForSweeping();
2688 }
2689
backingFree(void * address)2690 void HeapAllocator::backingFree(void* address)
2691 {
2692 if (!address || ThreadState::isAnyThreadInGC())
2693 return;
2694
2695 ThreadState* state = ThreadState::current();
2696 if (state->isSweepInProgress())
2697 return;
2698
2699 // Don't promptly free large objects because their page is never reused
2700 // and don't free backings allocated on other threads.
2701 BaseHeapPage* page = pageHeaderFromObject(address);
2702 if (page->isLargeObject() || page->threadState() != state)
2703 return;
2704
2705 typedef HeapIndexTrait<CollectionBackingHeap> HeapTraits;
2706 typedef HeapTraits::HeapType HeapType;
2707 typedef HeapTraits::HeaderType HeaderType;
2708
2709 HeaderType* header = HeaderType::fromPayload(address);
2710 header->checkHeader();
2711
2712 const GCInfo* gcInfo = header->gcInfo();
2713 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
2714 HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2715 heap->promptlyFreeObject(header);
2716 }
2717
2718 // Force template instantiations for the types that we need.
2719 template class HeapPage<FinalizedHeapObjectHeader>;
2720 template class HeapPage<HeapObjectHeader>;
2721 template class ThreadHeap<FinalizedHeapObjectHeader>;
2722 template class ThreadHeap<HeapObjectHeader>;
2723
2724 Visitor* Heap::s_markingVisitor;
2725 Vector<OwnPtr<blink::WebThread> >* Heap::s_markingThreads;
2726 CallbackStack* Heap::s_markingStack;
2727 CallbackStack* Heap::s_postMarkingCallbackStack;
2728 CallbackStack* Heap::s_weakCallbackStack;
2729 CallbackStack* Heap::s_ephemeronStack;
2730 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2731 bool Heap::s_shutdownCalled = false;
2732 bool Heap::s_lastGCWasConservative = false;
2733 FreePagePool* Heap::s_freePagePool;
2734 OrphanedPagePool* Heap::s_orphanedPagePool;
2735 }
2736