• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
33 #include "execution.h"
34 #include "gdb-jit.h"
35 #include "global-handles.h"
36 #include "heap-profiler.h"
37 #include "ic-inl.h"
38 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
53 
54 
55 // -------------------------------------------------------------------------
56 // MarkCompactCollector
57 
MarkCompactCollector()58 MarkCompactCollector::MarkCompactCollector() :  // NOLINT
59 #ifdef DEBUG
60       state_(IDLE),
61 #endif
62       sweep_precisely_(false),
63       reduce_memory_footprint_(false),
64       abort_incremental_marking_(false),
65       compacting_(false),
66       was_marked_incrementally_(false),
67       collect_maps_(FLAG_collect_maps),
68       flush_monomorphic_ics_(false),
69       tracer_(NULL),
70       migration_slots_buffer_(NULL),
71       heap_(NULL),
72       code_flusher_(NULL),
73       encountered_weak_maps_(NULL) { }
74 
75 
76 #ifdef DEBUG
77 class VerifyMarkingVisitor: public ObjectVisitor {
78  public:
VisitPointers(Object ** start,Object ** end)79   void VisitPointers(Object** start, Object** end) {
80     for (Object** current = start; current < end; current++) {
81       if ((*current)->IsHeapObject()) {
82         HeapObject* object = HeapObject::cast(*current);
83         ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
84       }
85     }
86   }
87 };
88 
89 
VerifyMarking(Address bottom,Address top)90 static void VerifyMarking(Address bottom, Address top) {
91   VerifyMarkingVisitor visitor;
92   HeapObject* object;
93   Address next_object_must_be_here_or_later = bottom;
94 
95   for (Address current = bottom;
96        current < top;
97        current += kPointerSize) {
98     object = HeapObject::FromAddress(current);
99     if (MarkCompactCollector::IsMarked(object)) {
100       ASSERT(current >= next_object_must_be_here_or_later);
101       object->Iterate(&visitor);
102       next_object_must_be_here_or_later = current + object->Size();
103     }
104   }
105 }
106 
107 
VerifyMarking(NewSpace * space)108 static void VerifyMarking(NewSpace* space) {
109   Address end = space->top();
110   NewSpacePageIterator it(space->bottom(), end);
111   // The bottom position is at the start of its page. Allows us to use
112   // page->area_start() as start of range on all pages.
113   ASSERT_EQ(space->bottom(),
114             NewSpacePage::FromAddress(space->bottom())->area_start());
115   while (it.has_next()) {
116     NewSpacePage* page = it.next();
117     Address limit = it.has_next() ? page->area_end() : end;
118     ASSERT(limit == end || !page->Contains(end));
119     VerifyMarking(page->area_start(), limit);
120   }
121 }
122 
123 
VerifyMarking(PagedSpace * space)124 static void VerifyMarking(PagedSpace* space) {
125   PageIterator it(space);
126 
127   while (it.has_next()) {
128     Page* p = it.next();
129     VerifyMarking(p->area_start(), p->area_end());
130   }
131 }
132 
133 
VerifyMarking(Heap * heap)134 static void VerifyMarking(Heap* heap) {
135   VerifyMarking(heap->old_pointer_space());
136   VerifyMarking(heap->old_data_space());
137   VerifyMarking(heap->code_space());
138   VerifyMarking(heap->cell_space());
139   VerifyMarking(heap->map_space());
140   VerifyMarking(heap->new_space());
141 
142   VerifyMarkingVisitor visitor;
143 
144   LargeObjectIterator it(heap->lo_space());
145   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
146     if (MarkCompactCollector::IsMarked(obj)) {
147       obj->Iterate(&visitor);
148     }
149   }
150 
151   heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
152 }
153 
154 
155 class VerifyEvacuationVisitor: public ObjectVisitor {
156  public:
VisitPointers(Object ** start,Object ** end)157   void VisitPointers(Object** start, Object** end) {
158     for (Object** current = start; current < end; current++) {
159       if ((*current)->IsHeapObject()) {
160         HeapObject* object = HeapObject::cast(*current);
161         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
162       }
163     }
164   }
165 };
166 
167 
VerifyEvacuation(Address bottom,Address top)168 static void VerifyEvacuation(Address bottom, Address top) {
169   VerifyEvacuationVisitor visitor;
170   HeapObject* object;
171   Address next_object_must_be_here_or_later = bottom;
172 
173   for (Address current = bottom;
174        current < top;
175        current += kPointerSize) {
176     object = HeapObject::FromAddress(current);
177     if (MarkCompactCollector::IsMarked(object)) {
178       ASSERT(current >= next_object_must_be_here_or_later);
179       object->Iterate(&visitor);
180       next_object_must_be_here_or_later = current + object->Size();
181     }
182   }
183 }
184 
185 
VerifyEvacuation(NewSpace * space)186 static void VerifyEvacuation(NewSpace* space) {
187   NewSpacePageIterator it(space->bottom(), space->top());
188   VerifyEvacuationVisitor visitor;
189 
190   while (it.has_next()) {
191     NewSpacePage* page = it.next();
192     Address current = page->area_start();
193     Address limit = it.has_next() ? page->area_end() : space->top();
194     ASSERT(limit == space->top() || !page->Contains(space->top()));
195     while (current < limit) {
196       HeapObject* object = HeapObject::FromAddress(current);
197       object->Iterate(&visitor);
198       current += object->Size();
199     }
200   }
201 }
202 
203 
VerifyEvacuation(PagedSpace * space)204 static void VerifyEvacuation(PagedSpace* space) {
205   PageIterator it(space);
206 
207   while (it.has_next()) {
208     Page* p = it.next();
209     if (p->IsEvacuationCandidate()) continue;
210     VerifyEvacuation(p->area_start(), p->area_end());
211   }
212 }
213 
214 
VerifyEvacuation(Heap * heap)215 static void VerifyEvacuation(Heap* heap) {
216   VerifyEvacuation(heap->old_pointer_space());
217   VerifyEvacuation(heap->old_data_space());
218   VerifyEvacuation(heap->code_space());
219   VerifyEvacuation(heap->cell_space());
220   VerifyEvacuation(heap->map_space());
221   VerifyEvacuation(heap->new_space());
222 
223   VerifyEvacuationVisitor visitor;
224   heap->IterateStrongRoots(&visitor, VISIT_ALL);
225 }
226 #endif
227 
228 
AddEvacuationCandidate(Page * p)229 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
230   p->MarkEvacuationCandidate();
231   evacuation_candidates_.Add(p);
232 }
233 
234 
TraceFragmentation(PagedSpace * space)235 static void TraceFragmentation(PagedSpace* space) {
236   int number_of_pages = space->CountTotalPages();
237   intptr_t reserved = (number_of_pages * space->AreaSize());
238   intptr_t free = reserved - space->SizeOfObjects();
239   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
240          AllocationSpaceName(space->identity()),
241          number_of_pages,
242          static_cast<int>(free),
243          static_cast<double>(free) * 100 / reserved);
244 }
245 
246 
StartCompaction(CompactionMode mode)247 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
248   if (!compacting_) {
249     ASSERT(evacuation_candidates_.length() == 0);
250 
251     CollectEvacuationCandidates(heap()->old_pointer_space());
252     CollectEvacuationCandidates(heap()->old_data_space());
253 
254     if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
255       CollectEvacuationCandidates(heap()->code_space());
256     } else if (FLAG_trace_fragmentation) {
257       TraceFragmentation(heap()->code_space());
258     }
259 
260     if (FLAG_trace_fragmentation) {
261       TraceFragmentation(heap()->map_space());
262       TraceFragmentation(heap()->cell_space());
263     }
264 
265     heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
266     heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
267     heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
268 
269     compacting_ = evacuation_candidates_.length() > 0;
270   }
271 
272   return compacting_;
273 }
274 
275 
CollectGarbage()276 void MarkCompactCollector::CollectGarbage() {
277   // Make sure that Prepare() has been called. The individual steps below will
278   // update the state as they proceed.
279   ASSERT(state_ == PREPARE_GC);
280   ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
281 
282   MarkLiveObjects();
283   ASSERT(heap_->incremental_marking()->IsStopped());
284 
285   if (collect_maps_) ClearNonLiveTransitions();
286 
287   ClearWeakMaps();
288 
289 #ifdef DEBUG
290   if (FLAG_verify_heap) {
291     VerifyMarking(heap_);
292   }
293 #endif
294 
295   SweepSpaces();
296 
297   if (!collect_maps_) ReattachInitialMaps();
298 
299   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
300 
301   Finish();
302 
303   tracer_ = NULL;
304 }
305 
306 
307 #ifdef DEBUG
VerifyMarkbitsAreClean(PagedSpace * space)308 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
309   PageIterator it(space);
310 
311   while (it.has_next()) {
312     Page* p = it.next();
313     CHECK(p->markbits()->IsClean());
314     CHECK_EQ(0, p->LiveBytes());
315   }
316 }
317 
VerifyMarkbitsAreClean(NewSpace * space)318 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
319   NewSpacePageIterator it(space->bottom(), space->top());
320 
321   while (it.has_next()) {
322     NewSpacePage* p = it.next();
323     CHECK(p->markbits()->IsClean());
324     CHECK_EQ(0, p->LiveBytes());
325   }
326 }
327 
VerifyMarkbitsAreClean()328 void MarkCompactCollector::VerifyMarkbitsAreClean() {
329   VerifyMarkbitsAreClean(heap_->old_pointer_space());
330   VerifyMarkbitsAreClean(heap_->old_data_space());
331   VerifyMarkbitsAreClean(heap_->code_space());
332   VerifyMarkbitsAreClean(heap_->cell_space());
333   VerifyMarkbitsAreClean(heap_->map_space());
334   VerifyMarkbitsAreClean(heap_->new_space());
335 
336   LargeObjectIterator it(heap_->lo_space());
337   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
338     MarkBit mark_bit = Marking::MarkBitFrom(obj);
339     ASSERT(Marking::IsWhite(mark_bit));
340   }
341 }
342 #endif
343 
344 
ClearMarkbitsInPagedSpace(PagedSpace * space)345 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
346   PageIterator it(space);
347 
348   while (it.has_next()) {
349     Bitmap::Clear(it.next());
350   }
351 }
352 
353 
ClearMarkbitsInNewSpace(NewSpace * space)354 static void ClearMarkbitsInNewSpace(NewSpace* space) {
355   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
356 
357   while (it.has_next()) {
358     Bitmap::Clear(it.next());
359   }
360 }
361 
362 
ClearMarkbits()363 void MarkCompactCollector::ClearMarkbits() {
364   ClearMarkbitsInPagedSpace(heap_->code_space());
365   ClearMarkbitsInPagedSpace(heap_->map_space());
366   ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
367   ClearMarkbitsInPagedSpace(heap_->old_data_space());
368   ClearMarkbitsInPagedSpace(heap_->cell_space());
369   ClearMarkbitsInNewSpace(heap_->new_space());
370 
371   LargeObjectIterator it(heap_->lo_space());
372   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
373     MarkBit mark_bit = Marking::MarkBitFrom(obj);
374     mark_bit.Clear();
375     mark_bit.Next().Clear();
376   }
377 }
378 
379 
TransferMark(Address old_start,Address new_start)380 bool Marking::TransferMark(Address old_start, Address new_start) {
381   // This is only used when resizing an object.
382   ASSERT(MemoryChunk::FromAddress(old_start) ==
383          MemoryChunk::FromAddress(new_start));
384 
385   // If the mark doesn't move, we don't check the color of the object.
386   // It doesn't matter whether the object is black, since it hasn't changed
387   // size, so the adjustment to the live data count will be zero anyway.
388   if (old_start == new_start) return false;
389 
390   MarkBit new_mark_bit = MarkBitFrom(new_start);
391   MarkBit old_mark_bit = MarkBitFrom(old_start);
392 
393 #ifdef DEBUG
394   ObjectColor old_color = Color(old_mark_bit);
395 #endif
396 
397   if (Marking::IsBlack(old_mark_bit)) {
398     old_mark_bit.Clear();
399     ASSERT(IsWhite(old_mark_bit));
400     Marking::MarkBlack(new_mark_bit);
401     return true;
402   } else if (Marking::IsGrey(old_mark_bit)) {
403     ASSERT(heap_->incremental_marking()->IsMarking());
404     old_mark_bit.Clear();
405     old_mark_bit.Next().Clear();
406     ASSERT(IsWhite(old_mark_bit));
407     heap_->incremental_marking()->WhiteToGreyAndPush(
408         HeapObject::FromAddress(new_start), new_mark_bit);
409     heap_->incremental_marking()->RestartIfNotMarking();
410   }
411 
412 #ifdef DEBUG
413   ObjectColor new_color = Color(new_mark_bit);
414   ASSERT(new_color == old_color);
415 #endif
416 
417   return false;
418 }
419 
420 
AllocationSpaceName(AllocationSpace space)421 const char* AllocationSpaceName(AllocationSpace space) {
422   switch (space) {
423     case NEW_SPACE: return "NEW_SPACE";
424     case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
425     case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
426     case CODE_SPACE: return "CODE_SPACE";
427     case MAP_SPACE: return "MAP_SPACE";
428     case CELL_SPACE: return "CELL_SPACE";
429     case LO_SPACE: return "LO_SPACE";
430     default:
431       UNREACHABLE();
432   }
433 
434   return NULL;
435 }
436 
437 
438 // Returns zero for pages that have so little fragmentation that it is not
439 // worth defragmenting them.  Otherwise a positive integer that gives an
440 // estimate of fragmentation on an arbitrary scale.
FreeListFragmentation(PagedSpace * space,Page * p)441 static int FreeListFragmentation(PagedSpace* space, Page* p) {
442   // If page was not swept then there are no free list items on it.
443   if (!p->WasSwept()) {
444     if (FLAG_trace_fragmentation) {
445       PrintF("%p [%s]: %d bytes live (unswept)\n",
446              reinterpret_cast<void*>(p),
447              AllocationSpaceName(space->identity()),
448              p->LiveBytes());
449     }
450     return 0;
451   }
452 
453   FreeList::SizeStats sizes;
454   space->CountFreeListItems(p, &sizes);
455 
456   intptr_t ratio;
457   intptr_t ratio_threshold;
458   intptr_t area_size = space->AreaSize();
459   if (space->identity() == CODE_SPACE) {
460     ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
461         area_size;
462     ratio_threshold = 10;
463   } else {
464     ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
465         area_size;
466     ratio_threshold = 15;
467   }
468 
469   if (FLAG_trace_fragmentation) {
470     PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
471            reinterpret_cast<void*>(p),
472            AllocationSpaceName(space->identity()),
473            static_cast<int>(sizes.small_size_),
474            static_cast<double>(sizes.small_size_ * 100) /
475            area_size,
476            static_cast<int>(sizes.medium_size_),
477            static_cast<double>(sizes.medium_size_ * 100) /
478            area_size,
479            static_cast<int>(sizes.large_size_),
480            static_cast<double>(sizes.large_size_ * 100) /
481            area_size,
482            static_cast<int>(sizes.huge_size_),
483            static_cast<double>(sizes.huge_size_ * 100) /
484            area_size,
485            (ratio > ratio_threshold) ? "[fragmented]" : "");
486   }
487 
488   if (FLAG_always_compact && sizes.Total() != area_size) {
489     return 1;
490   }
491 
492   if (ratio <= ratio_threshold) return 0;  // Not fragmented.
493 
494   return static_cast<int>(ratio - ratio_threshold);
495 }
496 
497 
CollectEvacuationCandidates(PagedSpace * space)498 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
499   ASSERT(space->identity() == OLD_POINTER_SPACE ||
500          space->identity() == OLD_DATA_SPACE ||
501          space->identity() == CODE_SPACE);
502 
503   int number_of_pages = space->CountTotalPages();
504 
505   const int kMaxMaxEvacuationCandidates = 1000;
506   int max_evacuation_candidates = Min(
507     kMaxMaxEvacuationCandidates,
508     static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
509 
510   if (FLAG_stress_compaction || FLAG_always_compact) {
511     max_evacuation_candidates = kMaxMaxEvacuationCandidates;
512   }
513 
514   class Candidate {
515    public:
516     Candidate() : fragmentation_(0), page_(NULL) { }
517     Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
518 
519     int fragmentation() { return fragmentation_; }
520     Page* page() { return page_; }
521 
522    private:
523     int fragmentation_;
524     Page* page_;
525   };
526 
527   enum CompactionMode {
528     COMPACT_FREE_LISTS,
529     REDUCE_MEMORY_FOOTPRINT
530   };
531 
532   CompactionMode mode = COMPACT_FREE_LISTS;
533 
534   intptr_t reserved = number_of_pages * space->AreaSize();
535   intptr_t over_reserved = reserved - space->SizeOfObjects();
536   static const intptr_t kFreenessThreshold = 50;
537 
538   if (over_reserved >= 2 * space->AreaSize() &&
539       reduce_memory_footprint_) {
540     mode = REDUCE_MEMORY_FOOTPRINT;
541 
542     // We expect that empty pages are easier to compact so slightly bump the
543     // limit.
544     max_evacuation_candidates += 2;
545 
546     if (FLAG_trace_fragmentation) {
547       PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
548              static_cast<double>(over_reserved) / MB,
549              static_cast<int>(kFreenessThreshold));
550     }
551   }
552 
553   intptr_t estimated_release = 0;
554 
555   Candidate candidates[kMaxMaxEvacuationCandidates];
556 
557   int count = 0;
558   int fragmentation = 0;
559   Candidate* least = NULL;
560 
561   PageIterator it(space);
562   if (it.has_next()) it.next();  // Never compact the first page.
563 
564   while (it.has_next()) {
565     Page* p = it.next();
566     p->ClearEvacuationCandidate();
567 
568     if (FLAG_stress_compaction) {
569       int counter = space->heap()->ms_count();
570       uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
571       if ((counter & 1) == (page_number & 1)) fragmentation = 1;
572     } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
573       // Don't try to release too many pages.
574       if (estimated_release >= ((over_reserved * 3) / 4)) {
575         continue;
576       }
577 
578       intptr_t free_bytes = 0;
579 
580       if (!p->WasSwept()) {
581         free_bytes = (p->area_size() - p->LiveBytes());
582       } else {
583         FreeList::SizeStats sizes;
584         space->CountFreeListItems(p, &sizes);
585         free_bytes = sizes.Total();
586       }
587 
588       int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
589 
590       if (free_pct >= kFreenessThreshold) {
591         estimated_release += 2 * p->area_size() - free_bytes;
592         fragmentation = free_pct;
593       } else {
594         fragmentation = 0;
595       }
596 
597       if (FLAG_trace_fragmentation) {
598         PrintF("%p [%s]: %d (%.2f%%) free %s\n",
599                reinterpret_cast<void*>(p),
600                AllocationSpaceName(space->identity()),
601                static_cast<int>(free_bytes),
602                static_cast<double>(free_bytes * 100) / p->area_size(),
603                (fragmentation > 0) ? "[fragmented]" : "");
604       }
605     } else {
606       fragmentation = FreeListFragmentation(space, p);
607     }
608 
609     if (fragmentation != 0) {
610       if (count < max_evacuation_candidates) {
611         candidates[count++] = Candidate(fragmentation, p);
612       } else {
613         if (least == NULL) {
614           for (int i = 0; i < max_evacuation_candidates; i++) {
615             if (least == NULL ||
616                 candidates[i].fragmentation() < least->fragmentation()) {
617               least = candidates + i;
618             }
619           }
620         }
621         if (least->fragmentation() < fragmentation) {
622           *least = Candidate(fragmentation, p);
623           least = NULL;
624         }
625       }
626     }
627   }
628 
629   for (int i = 0; i < count; i++) {
630     AddEvacuationCandidate(candidates[i].page());
631   }
632 
633   if (count > 0 && FLAG_trace_fragmentation) {
634     PrintF("Collected %d evacuation candidates for space %s\n",
635            count,
636            AllocationSpaceName(space->identity()));
637   }
638 }
639 
640 
AbortCompaction()641 void MarkCompactCollector::AbortCompaction() {
642   if (compacting_) {
643     int npages = evacuation_candidates_.length();
644     for (int i = 0; i < npages; i++) {
645       Page* p = evacuation_candidates_[i];
646       slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
647       p->ClearEvacuationCandidate();
648       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
649     }
650     compacting_ = false;
651     evacuation_candidates_.Rewind(0);
652     invalidated_code_.Rewind(0);
653   }
654   ASSERT_EQ(0, evacuation_candidates_.length());
655 }
656 
657 
Prepare(GCTracer * tracer)658 void MarkCompactCollector::Prepare(GCTracer* tracer) {
659   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
660 
661   // Disable collection of maps if incremental marking is enabled.
662   // Map collection algorithm relies on a special map transition tree traversal
663   // order which is not implemented for incremental marking.
664   collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
665 
666   // Monomorphic ICs are preserved when possible, but need to be flushed
667   // when they might be keeping a Context alive, or when the heap is about
668   // to be serialized.
669   flush_monomorphic_ics_ =
670       heap()->isolate()->context_exit_happened() || Serializer::enabled();
671 
672   // Rather than passing the tracer around we stash it in a static member
673   // variable.
674   tracer_ = tracer;
675 
676 #ifdef DEBUG
677   ASSERT(state_ == IDLE);
678   state_ = PREPARE_GC;
679 #endif
680 
681   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
682 
683   if (collect_maps_) CreateBackPointers();
684 #ifdef ENABLE_GDB_JIT_INTERFACE
685   if (FLAG_gdbjit) {
686     // If GDBJIT interface is active disable compaction.
687     compacting_collection_ = false;
688   }
689 #endif
690 
691   // Clear marking bits if incremental marking is aborted.
692   if (was_marked_incrementally_ && abort_incremental_marking_) {
693     heap()->incremental_marking()->Abort();
694     ClearMarkbits();
695     AbortCompaction();
696     was_marked_incrementally_ = false;
697   }
698 
699   // Don't start compaction if we are in the middle of incremental
700   // marking cycle. We did not collect any slots.
701   if (!FLAG_never_compact && !was_marked_incrementally_) {
702     StartCompaction(NON_INCREMENTAL_COMPACTION);
703   }
704 
705   PagedSpaces spaces;
706   for (PagedSpace* space = spaces.next();
707        space != NULL;
708        space = spaces.next()) {
709     space->PrepareForMarkCompact();
710   }
711 
712 #ifdef DEBUG
713   if (!was_marked_incrementally_ && FLAG_verify_heap) {
714     VerifyMarkbitsAreClean();
715   }
716 #endif
717 }
718 
719 
Finish()720 void MarkCompactCollector::Finish() {
721 #ifdef DEBUG
722   ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
723   state_ = IDLE;
724 #endif
725   // The stub cache is not traversed during GC; clear the cache to
726   // force lazy re-initialization of it. This must be done after the
727   // GC, because it relies on the new address of certain old space
728   // objects (empty string, illegal builtin).
729   heap()->isolate()->stub_cache()->Clear();
730 
731   heap()->external_string_table_.CleanUp();
732 }
733 
734 
735 // -------------------------------------------------------------------------
736 // Phase 1: tracing and marking live objects.
737 //   before: all objects are in normal state.
738 //   after: a live object's map pointer is marked as '00'.
739 
740 // Marking all live objects in the heap as part of mark-sweep or mark-compact
741 // collection.  Before marking, all objects are in their normal state.  After
742 // marking, live objects' map pointers are marked indicating that the object
743 // has been found reachable.
744 //
745 // The marking algorithm is a (mostly) depth-first (because of possible stack
746 // overflow) traversal of the graph of objects reachable from the roots.  It
747 // uses an explicit stack of pointers rather than recursion.  The young
748 // generation's inactive ('from') space is used as a marking stack.  The
749 // objects in the marking stack are the ones that have been reached and marked
750 // but their children have not yet been visited.
751 //
752 // The marking stack can overflow during traversal.  In that case, we set an
753 // overflow flag.  When the overflow flag is set, we continue marking objects
754 // reachable from the objects on the marking stack, but no longer push them on
755 // the marking stack.  Instead, we mark them as both marked and overflowed.
756 // When the stack is in the overflowed state, objects marked as overflowed
757 // have been reached and marked but their children have not been visited yet.
758 // After emptying the marking stack, we clear the overflow flag and traverse
759 // the heap looking for objects marked as overflowed, push them on the stack,
760 // and continue with marking.  This process repeats until all reachable
761 // objects have been marked.
762 
763 class CodeFlusher {
764  public:
CodeFlusher(Isolate * isolate)765   explicit CodeFlusher(Isolate* isolate)
766       : isolate_(isolate),
767         jsfunction_candidates_head_(NULL),
768         shared_function_info_candidates_head_(NULL) {}
769 
AddCandidate(SharedFunctionInfo * shared_info)770   void AddCandidate(SharedFunctionInfo* shared_info) {
771     SetNextCandidate(shared_info, shared_function_info_candidates_head_);
772     shared_function_info_candidates_head_ = shared_info;
773   }
774 
AddCandidate(JSFunction * function)775   void AddCandidate(JSFunction* function) {
776     ASSERT(function->code() == function->shared()->code());
777 
778     SetNextCandidate(function, jsfunction_candidates_head_);
779     jsfunction_candidates_head_ = function;
780   }
781 
ProcessCandidates()782   void ProcessCandidates() {
783     ProcessSharedFunctionInfoCandidates();
784     ProcessJSFunctionCandidates();
785   }
786 
787  private:
ProcessJSFunctionCandidates()788   void ProcessJSFunctionCandidates() {
789     Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
790 
791     JSFunction* candidate = jsfunction_candidates_head_;
792     JSFunction* next_candidate;
793     while (candidate != NULL) {
794       next_candidate = GetNextCandidate(candidate);
795 
796       SharedFunctionInfo* shared = candidate->shared();
797 
798       Code* code = shared->code();
799       MarkBit code_mark = Marking::MarkBitFrom(code);
800       if (!code_mark.Get()) {
801         shared->set_code(lazy_compile);
802         candidate->set_code(lazy_compile);
803       } else {
804         candidate->set_code(shared->code());
805       }
806 
807       // We are in the middle of a GC cycle so the write barrier in the code
808       // setter did not record the slot update and we have to do that manually.
809       Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
810       Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
811       isolate_->heap()->mark_compact_collector()->
812           RecordCodeEntrySlot(slot, target);
813 
814       RecordSharedFunctionInfoCodeSlot(shared);
815 
816       candidate = next_candidate;
817     }
818 
819     jsfunction_candidates_head_ = NULL;
820   }
821 
822 
ProcessSharedFunctionInfoCandidates()823   void ProcessSharedFunctionInfoCandidates() {
824     Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
825 
826     SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
827     SharedFunctionInfo* next_candidate;
828     while (candidate != NULL) {
829       next_candidate = GetNextCandidate(candidate);
830       SetNextCandidate(candidate, NULL);
831 
832       Code* code = candidate->code();
833       MarkBit code_mark = Marking::MarkBitFrom(code);
834       if (!code_mark.Get()) {
835         candidate->set_code(lazy_compile);
836       }
837 
838       RecordSharedFunctionInfoCodeSlot(candidate);
839 
840       candidate = next_candidate;
841     }
842 
843     shared_function_info_candidates_head_ = NULL;
844   }
845 
RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo * shared)846   void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
847     Object** slot = HeapObject::RawField(shared,
848                                          SharedFunctionInfo::kCodeOffset);
849     isolate_->heap()->mark_compact_collector()->
850         RecordSlot(slot, slot, HeapObject::cast(*slot));
851   }
852 
GetNextCandidateField(JSFunction * candidate)853   static JSFunction** GetNextCandidateField(JSFunction* candidate) {
854     return reinterpret_cast<JSFunction**>(
855         candidate->address() + JSFunction::kCodeEntryOffset);
856   }
857 
GetNextCandidate(JSFunction * candidate)858   static JSFunction* GetNextCandidate(JSFunction* candidate) {
859     return *GetNextCandidateField(candidate);
860   }
861 
SetNextCandidate(JSFunction * candidate,JSFunction * next_candidate)862   static void SetNextCandidate(JSFunction* candidate,
863                                JSFunction* next_candidate) {
864     *GetNextCandidateField(candidate) = next_candidate;
865   }
866 
GetNextCandidateField(SharedFunctionInfo * candidate)867   static SharedFunctionInfo** GetNextCandidateField(
868       SharedFunctionInfo* candidate) {
869     Code* code = candidate->code();
870     return reinterpret_cast<SharedFunctionInfo**>(
871         code->address() + Code::kGCMetadataOffset);
872   }
873 
GetNextCandidate(SharedFunctionInfo * candidate)874   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
875     return reinterpret_cast<SharedFunctionInfo*>(
876         candidate->code()->gc_metadata());
877   }
878 
SetNextCandidate(SharedFunctionInfo * candidate,SharedFunctionInfo * next_candidate)879   static void SetNextCandidate(SharedFunctionInfo* candidate,
880                                SharedFunctionInfo* next_candidate) {
881     candidate->code()->set_gc_metadata(next_candidate);
882   }
883 
884   Isolate* isolate_;
885   JSFunction* jsfunction_candidates_head_;
886   SharedFunctionInfo* shared_function_info_candidates_head_;
887 
888   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
889 };
890 
891 
~MarkCompactCollector()892 MarkCompactCollector::~MarkCompactCollector() {
893   if (code_flusher_ != NULL) {
894     delete code_flusher_;
895     code_flusher_ = NULL;
896   }
897 }
898 
899 
ShortCircuitConsString(Object ** p)900 static inline HeapObject* ShortCircuitConsString(Object** p) {
901   // Optimization: If the heap object pointed to by p is a non-symbol
902   // cons string whose right substring is HEAP->empty_string, update
903   // it in place to its left substring.  Return the updated value.
904   //
905   // Here we assume that if we change *p, we replace it with a heap object
906   // (i.e., the left substring of a cons string is always a heap object).
907   //
908   // The check performed is:
909   //   object->IsConsString() && !object->IsSymbol() &&
910   //   (ConsString::cast(object)->second() == HEAP->empty_string())
911   // except the maps for the object and its possible substrings might be
912   // marked.
913   HeapObject* object = HeapObject::cast(*p);
914   if (!FLAG_clever_optimizations) return object;
915   Map* map = object->map();
916   InstanceType type = map->instance_type();
917   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
918 
919   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
920   Heap* heap = map->GetHeap();
921   if (second != heap->empty_string()) {
922     return object;
923   }
924 
925   // Since we don't have the object's start, it is impossible to update the
926   // page dirty marks. Therefore, we only replace the string with its left
927   // substring when page dirty marks do not change.
928   Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
929   if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
930 
931   *p = first;
932   return HeapObject::cast(first);
933 }
934 
935 
936 class StaticMarkingVisitor : public StaticVisitorBase {
937  public:
IterateBody(Map * map,HeapObject * obj)938   static inline void IterateBody(Map* map, HeapObject* obj) {
939     table_.GetVisitor(map)(map, obj);
940   }
941 
Initialize()942   static void Initialize() {
943     table_.Register(kVisitShortcutCandidate,
944                     &FixedBodyVisitor<StaticMarkingVisitor,
945                                       ConsString::BodyDescriptor,
946                                       void>::Visit);
947 
948     table_.Register(kVisitConsString,
949                     &FixedBodyVisitor<StaticMarkingVisitor,
950                                       ConsString::BodyDescriptor,
951                                       void>::Visit);
952 
953     table_.Register(kVisitSlicedString,
954                     &FixedBodyVisitor<StaticMarkingVisitor,
955                                       SlicedString::BodyDescriptor,
956                                       void>::Visit);
957 
958     table_.Register(kVisitFixedArray,
959                     &FlexibleBodyVisitor<StaticMarkingVisitor,
960                                          FixedArray::BodyDescriptor,
961                                          void>::Visit);
962 
963     table_.Register(kVisitGlobalContext, &VisitGlobalContext);
964 
965     table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
966 
967     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
968     table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
969     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
970     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
971 
972     table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
973 
974     table_.Register(kVisitOddball,
975                     &FixedBodyVisitor<StaticMarkingVisitor,
976                                       Oddball::BodyDescriptor,
977                                       void>::Visit);
978     table_.Register(kVisitMap,
979                     &FixedBodyVisitor<StaticMarkingVisitor,
980                                       Map::BodyDescriptor,
981                                       void>::Visit);
982 
983     table_.Register(kVisitCode, &VisitCode);
984 
985     table_.Register(kVisitSharedFunctionInfo,
986                     &VisitSharedFunctionInfoAndFlushCode);
987 
988     table_.Register(kVisitJSFunction,
989                     &VisitJSFunctionAndFlushCode);
990 
991     table_.Register(kVisitJSRegExp,
992                     &VisitRegExpAndFlushCode);
993 
994     table_.Register(kVisitPropertyCell,
995                     &FixedBodyVisitor<StaticMarkingVisitor,
996                                       JSGlobalPropertyCell::BodyDescriptor,
997                                       void>::Visit);
998 
999     table_.RegisterSpecializations<DataObjectVisitor,
1000                                    kVisitDataObject,
1001                                    kVisitDataObjectGeneric>();
1002 
1003     table_.RegisterSpecializations<JSObjectVisitor,
1004                                    kVisitJSObject,
1005                                    kVisitJSObjectGeneric>();
1006 
1007     table_.RegisterSpecializations<StructObjectVisitor,
1008                                    kVisitStruct,
1009                                    kVisitStructGeneric>();
1010   }
1011 
INLINE(static void VisitPointer (Heap * heap,Object ** p))1012   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1013     MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1014   }
1015 
INLINE(static void VisitPointers (Heap * heap,Object ** start,Object ** end))1016   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1017     // Mark all objects pointed to in [start, end).
1018     const int kMinRangeForMarkingRecursion = 64;
1019     if (end - start >= kMinRangeForMarkingRecursion) {
1020       if (VisitUnmarkedObjects(heap, start, end)) return;
1021       // We are close to a stack overflow, so just mark the objects.
1022     }
1023     MarkCompactCollector* collector = heap->mark_compact_collector();
1024     for (Object** p = start; p < end; p++) {
1025       MarkObjectByPointer(collector, start, p);
1026     }
1027   }
1028 
VisitGlobalPropertyCell(Heap * heap,RelocInfo * rinfo)1029   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
1030     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
1031     JSGlobalPropertyCell* cell =
1032         JSGlobalPropertyCell::cast(rinfo->target_cell());
1033     MarkBit mark = Marking::MarkBitFrom(cell);
1034     heap->mark_compact_collector()->MarkObject(cell, mark);
1035   }
1036 
VisitEmbeddedPointer(Heap * heap,RelocInfo * rinfo)1037   static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
1038     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1039     // TODO(mstarzinger): We do not short-circuit cons strings here, verify
1040     // that there can be no such embedded pointers and add assertion here.
1041     HeapObject* object = HeapObject::cast(rinfo->target_object());
1042     heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
1043     MarkBit mark = Marking::MarkBitFrom(object);
1044     heap->mark_compact_collector()->MarkObject(object, mark);
1045   }
1046 
VisitCodeTarget(Heap * heap,RelocInfo * rinfo)1047   static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
1048     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1049     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1050     if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
1051         && (target->ic_state() == MEGAMORPHIC ||
1052             heap->mark_compact_collector()->flush_monomorphic_ics_ ||
1053             target->ic_age() != heap->global_ic_age())) {
1054       IC::Clear(rinfo->pc());
1055       target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1056     }
1057     MarkBit code_mark = Marking::MarkBitFrom(target);
1058     heap->mark_compact_collector()->MarkObject(target, code_mark);
1059     heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1060   }
1061 
VisitDebugTarget(Heap * heap,RelocInfo * rinfo)1062   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
1063     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1064             rinfo->IsPatchedReturnSequence()) ||
1065            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1066             rinfo->IsPatchedDebugBreakSlotSequence()));
1067     Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1068     MarkBit code_mark = Marking::MarkBitFrom(target);
1069     heap->mark_compact_collector()->MarkObject(target, code_mark);
1070     heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1071   }
1072 
1073   // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,Object ** anchor_slot,Object ** p))1074   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1075                                          Object** anchor_slot,
1076                                          Object** p)) {
1077     if (!(*p)->IsHeapObject()) return;
1078     HeapObject* object = ShortCircuitConsString(p);
1079     collector->RecordSlot(anchor_slot, p, object);
1080     MarkBit mark = Marking::MarkBitFrom(object);
1081     collector->MarkObject(object, mark);
1082   }
1083 
1084 
1085   // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1086   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1087                                          HeapObject* obj)) {
1088 #ifdef DEBUG
1089     ASSERT(Isolate::Current()->heap()->Contains(obj));
1090     ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1091 #endif
1092     Map* map = obj->map();
1093     Heap* heap = obj->GetHeap();
1094     MarkBit mark = Marking::MarkBitFrom(obj);
1095     heap->mark_compact_collector()->SetMark(obj, mark);
1096     // Mark the map pointer and the body.
1097     MarkBit map_mark = Marking::MarkBitFrom(map);
1098     heap->mark_compact_collector()->MarkObject(map, map_mark);
1099     IterateBody(map, obj);
1100   }
1101 
1102   // Visit all unmarked objects pointed to by [start, end).
1103   // Returns false if the operation fails (lack of stack space).
VisitUnmarkedObjects(Heap * heap,Object ** start,Object ** end)1104   static inline bool VisitUnmarkedObjects(Heap* heap,
1105                                           Object** start,
1106                                           Object** end) {
1107     // Return false is we are close to the stack limit.
1108     StackLimitCheck check(heap->isolate());
1109     if (check.HasOverflowed()) return false;
1110 
1111     MarkCompactCollector* collector = heap->mark_compact_collector();
1112     // Visit the unmarked objects.
1113     for (Object** p = start; p < end; p++) {
1114       Object* o = *p;
1115       if (!o->IsHeapObject()) continue;
1116       collector->RecordSlot(start, p, o);
1117       HeapObject* obj = HeapObject::cast(o);
1118       MarkBit mark = Marking::MarkBitFrom(obj);
1119       if (mark.Get()) continue;
1120       VisitUnmarkedObject(collector, obj);
1121     }
1122     return true;
1123   }
1124 
VisitExternalReference(Address * p)1125   static inline void VisitExternalReference(Address* p) { }
VisitExternalReference(RelocInfo * rinfo)1126   static inline void VisitExternalReference(RelocInfo* rinfo) { }
VisitRuntimeEntry(RelocInfo * rinfo)1127   static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
1128 
1129  private:
1130   class DataObjectVisitor {
1131    public:
1132     template<int size>
VisitSpecialized(Map * map,HeapObject * object)1133     static void VisitSpecialized(Map* map, HeapObject* object) {
1134     }
1135 
Visit(Map * map,HeapObject * object)1136     static void Visit(Map* map, HeapObject* object) {
1137     }
1138   };
1139 
1140   typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1141                               JSObject::BodyDescriptor,
1142                               void> JSObjectVisitor;
1143 
1144   typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1145                               StructBodyDescriptor,
1146                               void> StructObjectVisitor;
1147 
VisitJSWeakMap(Map * map,HeapObject * object)1148   static void VisitJSWeakMap(Map* map, HeapObject* object) {
1149     MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1150     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1151 
1152     // Enqueue weak map in linked list of encountered weak maps.
1153     ASSERT(weak_map->next() == Smi::FromInt(0));
1154     weak_map->set_next(collector->encountered_weak_maps());
1155     collector->set_encountered_weak_maps(weak_map);
1156 
1157     // Skip visiting the backing hash table containing the mappings.
1158     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
1159     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1160         map->GetHeap(),
1161         object,
1162         JSWeakMap::BodyDescriptor::kStartOffset,
1163         JSWeakMap::kTableOffset);
1164     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1165         map->GetHeap(),
1166         object,
1167         JSWeakMap::kTableOffset + kPointerSize,
1168         object_size);
1169 
1170     // Mark the backing hash table without pushing it on the marking stack.
1171     ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
1172     ASSERT(!MarkCompactCollector::IsMarked(table));
1173     collector->SetMark(table, Marking::MarkBitFrom(table));
1174     collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1175     ASSERT(MarkCompactCollector::IsMarked(table->map()));
1176   }
1177 
VisitCode(Map * map,HeapObject * object)1178   static void VisitCode(Map* map, HeapObject* object) {
1179     Heap* heap = map->GetHeap();
1180     Code* code = reinterpret_cast<Code*>(object);
1181     if (FLAG_cleanup_code_caches_at_gc) {
1182       Object* raw_info = code->type_feedback_info();
1183       if (raw_info->IsTypeFeedbackInfo()) {
1184         TypeFeedbackCells* type_feedback_cells =
1185             TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
1186         for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
1187           ASSERT(type_feedback_cells->AstId(i)->IsSmi());
1188           JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
1189           cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
1190         }
1191       }
1192     }
1193     code->CodeIterateBody<StaticMarkingVisitor>(heap);
1194   }
1195 
1196   // Code flushing support.
1197 
1198   // How many collections newly compiled code object will survive before being
1199   // flushed.
1200   static const int kCodeAgeThreshold = 5;
1201 
1202   static const int kRegExpCodeThreshold = 5;
1203 
HasSourceCode(Heap * heap,SharedFunctionInfo * info)1204   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
1205     Object* undefined = heap->undefined_value();
1206     return (info->script() != undefined) &&
1207         (reinterpret_cast<Script*>(info->script())->source() != undefined);
1208   }
1209 
1210 
IsCompiled(JSFunction * function)1211   inline static bool IsCompiled(JSFunction* function) {
1212     return function->code() !=
1213         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1214   }
1215 
IsCompiled(SharedFunctionInfo * function)1216   inline static bool IsCompiled(SharedFunctionInfo* function) {
1217     return function->code() !=
1218         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1219   }
1220 
IsFlushable(Heap * heap,JSFunction * function)1221   inline static bool IsFlushable(Heap* heap, JSFunction* function) {
1222     SharedFunctionInfo* shared_info = function->unchecked_shared();
1223 
1224     // Code is either on stack, in compilation cache or referenced
1225     // by optimized version of function.
1226     MarkBit code_mark = Marking::MarkBitFrom(function->code());
1227     if (code_mark.Get()) {
1228       if (!Marking::MarkBitFrom(shared_info).Get()) {
1229         shared_info->set_code_age(0);
1230       }
1231       return false;
1232     }
1233 
1234     // We do not flush code for optimized functions.
1235     if (function->code() != shared_info->code()) {
1236       return false;
1237     }
1238 
1239     return IsFlushable(heap, shared_info);
1240   }
1241 
IsFlushable(Heap * heap,SharedFunctionInfo * shared_info)1242   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
1243     // Code is either on stack, in compilation cache or referenced
1244     // by optimized version of function.
1245     MarkBit code_mark =
1246         Marking::MarkBitFrom(shared_info->code());
1247     if (code_mark.Get()) {
1248       return false;
1249     }
1250 
1251     // The function must be compiled and have the source code available,
1252     // to be able to recompile it in case we need the function again.
1253     if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
1254       return false;
1255     }
1256 
1257     // We never flush code for Api functions.
1258     Object* function_data = shared_info->function_data();
1259     if (function_data->IsFunctionTemplateInfo()) {
1260       return false;
1261     }
1262 
1263     // Only flush code for functions.
1264     if (shared_info->code()->kind() != Code::FUNCTION) {
1265       return false;
1266     }
1267 
1268     // Function must be lazy compilable.
1269     if (!shared_info->allows_lazy_compilation()) {
1270       return false;
1271     }
1272 
1273     // If this is a full script wrapped in a function we do no flush the code.
1274     if (shared_info->is_toplevel()) {
1275       return false;
1276     }
1277 
1278     // Age this shared function info.
1279     if (shared_info->code_age() < kCodeAgeThreshold) {
1280       shared_info->set_code_age(shared_info->code_age() + 1);
1281       return false;
1282     }
1283 
1284     return true;
1285   }
1286 
1287 
FlushCodeForFunction(Heap * heap,JSFunction * function)1288   static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
1289     if (!IsFlushable(heap, function)) return false;
1290 
1291     // This function's code looks flushable. But we have to postpone the
1292     // decision until we see all functions that point to the same
1293     // SharedFunctionInfo because some of them might be optimized.
1294     // That would make the nonoptimized version of the code nonflushable,
1295     // because it is required for bailing out from optimized code.
1296     heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
1297     return true;
1298   }
1299 
IsValidNotBuiltinContext(Object * ctx)1300   static inline bool IsValidNotBuiltinContext(Object* ctx) {
1301     return ctx->IsContext() &&
1302         !Context::cast(ctx)->global()->IsJSBuiltinsObject();
1303   }
1304 
1305 
VisitSharedFunctionInfoGeneric(Map * map,HeapObject * object)1306   static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
1307     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1308 
1309     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1310 
1311     FixedBodyVisitor<StaticMarkingVisitor,
1312                      SharedFunctionInfo::BodyDescriptor,
1313                      void>::Visit(map, object);
1314   }
1315 
1316 
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_ascii)1317   static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1318                                           JSRegExp* re,
1319                                           bool is_ascii) {
1320     // Make sure that the fixed array is in fact initialized on the RegExp.
1321     // We could potentially trigger a GC when initializing the RegExp.
1322     if (HeapObject::cast(re->data())->map()->instance_type() !=
1323             FIXED_ARRAY_TYPE) return;
1324 
1325     // Make sure this is a RegExp that actually contains code.
1326     if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1327 
1328     Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
1329     if (!code->IsSmi() &&
1330         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1331       // Save a copy that can be reinstated if we need the code again.
1332       re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1333                              code,
1334                              heap);
1335 
1336       // Saving a copy might create a pointer into compaction candidate
1337       // that was not observed by marker.  This might happen if JSRegExp data
1338       // was marked through the compilation cache before marker reached JSRegExp
1339       // object.
1340       FixedArray* data = FixedArray::cast(re->data());
1341       Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1342       heap->mark_compact_collector()->
1343           RecordSlot(slot, slot, code);
1344 
1345       // Set a number in the 0-255 range to guarantee no smi overflow.
1346       re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1347                              Smi::FromInt(heap->sweep_generation() & 0xff),
1348                              heap);
1349     } else if (code->IsSmi()) {
1350       int value = Smi::cast(code)->value();
1351       // The regexp has not been compiled yet or there was a compilation error.
1352       if (value == JSRegExp::kUninitializedValue ||
1353           value == JSRegExp::kCompilationErrorValue) {
1354         return;
1355       }
1356 
1357       // Check if we should flush now.
1358       if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1359         re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1360                                Smi::FromInt(JSRegExp::kUninitializedValue),
1361                                heap);
1362         re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1363                                Smi::FromInt(JSRegExp::kUninitializedValue),
1364                                heap);
1365       }
1366     }
1367   }
1368 
1369 
1370   // Works by setting the current sweep_generation (as a smi) in the
1371   // code object place in the data array of the RegExp and keeps a copy
1372   // around that can be reinstated if we reuse the RegExp before flushing.
1373   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1374   // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1375   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1376     Heap* heap = map->GetHeap();
1377     MarkCompactCollector* collector = heap->mark_compact_collector();
1378     if (!collector->is_code_flushing_enabled()) {
1379       VisitJSRegExpFields(map, object);
1380       return;
1381     }
1382     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1383     // Flush code or set age on both ASCII and two byte code.
1384     UpdateRegExpCodeAgeAndFlush(heap, re, true);
1385     UpdateRegExpCodeAgeAndFlush(heap, re, false);
1386     // Visit the fields of the RegExp, including the updated FixedArray.
1387     VisitJSRegExpFields(map, object);
1388   }
1389 
1390 
VisitSharedFunctionInfoAndFlushCode(Map * map,HeapObject * object)1391   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
1392                                                   HeapObject* object) {
1393     MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1394     if (!collector->is_code_flushing_enabled()) {
1395       VisitSharedFunctionInfoGeneric(map, object);
1396       return;
1397     }
1398     VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
1399   }
1400 
1401 
VisitSharedFunctionInfoAndFlushCodeGeneric(Map * map,HeapObject * object,bool known_flush_code_candidate)1402   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
1403       Map* map, HeapObject* object, bool known_flush_code_candidate) {
1404     Heap* heap = map->GetHeap();
1405     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1406 
1407     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1408 
1409     if (shared->ic_age() != heap->global_ic_age()) {
1410       shared->ResetForNewContext(heap->global_ic_age());
1411     }
1412 
1413     if (!known_flush_code_candidate) {
1414       known_flush_code_candidate = IsFlushable(heap, shared);
1415       if (known_flush_code_candidate) {
1416         heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
1417       }
1418     }
1419 
1420     VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
1421   }
1422 
1423 
VisitCodeEntry(Heap * heap,Address entry_address)1424   static void VisitCodeEntry(Heap* heap, Address entry_address) {
1425     Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1426     MarkBit mark = Marking::MarkBitFrom(code);
1427     heap->mark_compact_collector()->MarkObject(code, mark);
1428     heap->mark_compact_collector()->
1429         RecordCodeEntrySlot(entry_address, code);
1430   }
1431 
VisitGlobalContext(Map * map,HeapObject * object)1432   static void VisitGlobalContext(Map* map, HeapObject* object) {
1433     FixedBodyVisitor<StaticMarkingVisitor,
1434                      Context::MarkCompactBodyDescriptor,
1435                      void>::Visit(map, object);
1436 
1437     MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1438     for (int idx = Context::FIRST_WEAK_SLOT;
1439          idx < Context::GLOBAL_CONTEXT_SLOTS;
1440          ++idx) {
1441       Object** slot =
1442           HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
1443       collector->RecordSlot(slot, slot, *slot);
1444     }
1445   }
1446 
VisitJSFunctionAndFlushCode(Map * map,HeapObject * object)1447   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
1448     Heap* heap = map->GetHeap();
1449     MarkCompactCollector* collector = heap->mark_compact_collector();
1450     if (!collector->is_code_flushing_enabled()) {
1451       VisitJSFunction(map, object);
1452       return;
1453     }
1454 
1455     JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
1456     // The function must have a valid context and not be a builtin.
1457     bool flush_code_candidate = false;
1458     if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
1459       flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
1460     }
1461 
1462     if (!flush_code_candidate) {
1463       Code* code = jsfunction->shared()->code();
1464       MarkBit code_mark = Marking::MarkBitFrom(code);
1465       collector->MarkObject(code, code_mark);
1466 
1467       if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
1468         collector->MarkInlinedFunctionsCode(jsfunction->code());
1469       }
1470     }
1471 
1472     VisitJSFunctionFields(map,
1473                           reinterpret_cast<JSFunction*>(object),
1474                           flush_code_candidate);
1475   }
1476 
1477 
VisitJSFunction(Map * map,HeapObject * object)1478   static void VisitJSFunction(Map* map, HeapObject* object) {
1479     VisitJSFunctionFields(map,
1480                           reinterpret_cast<JSFunction*>(object),
1481                           false);
1482   }
1483 
1484 
1485 #define SLOT_ADDR(obj, offset) \
1486   reinterpret_cast<Object**>((obj)->address() + offset)
1487 
1488 
VisitJSFunctionFields(Map * map,JSFunction * object,bool flush_code_candidate)1489   static inline void VisitJSFunctionFields(Map* map,
1490                                            JSFunction* object,
1491                                            bool flush_code_candidate) {
1492     Heap* heap = map->GetHeap();
1493 
1494     VisitPointers(heap,
1495                   HeapObject::RawField(object, JSFunction::kPropertiesOffset),
1496                   HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
1497 
1498     if (!flush_code_candidate) {
1499       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
1500     } else {
1501       // Don't visit code object.
1502 
1503       // Visit shared function info to avoid double checking of it's
1504       // flushability.
1505       SharedFunctionInfo* shared_info = object->unchecked_shared();
1506       MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1507       if (!shared_info_mark.Get()) {
1508         Map* shared_info_map = shared_info->map();
1509         MarkBit shared_info_map_mark =
1510             Marking::MarkBitFrom(shared_info_map);
1511         heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1512         heap->mark_compact_collector()->MarkObject(shared_info_map,
1513                                                    shared_info_map_mark);
1514         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
1515                                                    shared_info,
1516                                                    true);
1517       }
1518     }
1519 
1520     VisitPointers(
1521         heap,
1522         HeapObject::RawField(object,
1523                              JSFunction::kCodeEntryOffset + kPointerSize),
1524         HeapObject::RawField(object,
1525                              JSFunction::kNonWeakFieldsEndOffset));
1526 
1527     // Don't visit the next function list field as it is a weak reference.
1528     Object** next_function =
1529         HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
1530     heap->mark_compact_collector()->RecordSlot(
1531         next_function, next_function, *next_function);
1532   }
1533 
VisitJSRegExpFields(Map * map,HeapObject * object)1534   static inline void VisitJSRegExpFields(Map* map,
1535                                          HeapObject* object) {
1536     int last_property_offset =
1537         JSRegExp::kSize + kPointerSize * map->inobject_properties();
1538     VisitPointers(map->GetHeap(),
1539                   SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
1540                   SLOT_ADDR(object, last_property_offset));
1541   }
1542 
1543 
VisitSharedFunctionInfoFields(Heap * heap,HeapObject * object,bool flush_code_candidate)1544   static void VisitSharedFunctionInfoFields(Heap* heap,
1545                                             HeapObject* object,
1546                                             bool flush_code_candidate) {
1547     VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
1548 
1549     if (!flush_code_candidate) {
1550       VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
1551     }
1552 
1553     VisitPointers(heap,
1554                   SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
1555                   SLOT_ADDR(object, SharedFunctionInfo::kSize));
1556   }
1557 
1558   #undef SLOT_ADDR
1559 
1560   typedef void (*Callback)(Map* map, HeapObject* object);
1561 
1562   static VisitorDispatchTable<Callback> table_;
1563 };
1564 
1565 
1566 VisitorDispatchTable<StaticMarkingVisitor::Callback>
1567   StaticMarkingVisitor::table_;
1568 
1569 
1570 class MarkingVisitor : public ObjectVisitor {
1571  public:
MarkingVisitor(Heap * heap)1572   explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1573 
VisitPointer(Object ** p)1574   void VisitPointer(Object** p) {
1575     StaticMarkingVisitor::VisitPointer(heap_, p);
1576   }
1577 
VisitPointers(Object ** start,Object ** end)1578   void VisitPointers(Object** start, Object** end) {
1579     StaticMarkingVisitor::VisitPointers(heap_, start, end);
1580   }
1581 
1582  private:
1583   Heap* heap_;
1584 };
1585 
1586 
1587 class CodeMarkingVisitor : public ThreadVisitor {
1588  public:
CodeMarkingVisitor(MarkCompactCollector * collector)1589   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1590       : collector_(collector) {}
1591 
VisitThread(Isolate * isolate,ThreadLocalTop * top)1592   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1593     collector_->PrepareThreadForCodeFlushing(isolate, top);
1594   }
1595 
1596  private:
1597   MarkCompactCollector* collector_;
1598 };
1599 
1600 
1601 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1602  public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1603   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1604       : collector_(collector) {}
1605 
VisitPointers(Object ** start,Object ** end)1606   void VisitPointers(Object** start, Object** end) {
1607     for (Object** p = start; p < end; p++) VisitPointer(p);
1608   }
1609 
VisitPointer(Object ** slot)1610   void VisitPointer(Object** slot) {
1611     Object* obj = *slot;
1612     if (obj->IsSharedFunctionInfo()) {
1613       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1614       MarkBit shared_mark = Marking::MarkBitFrom(shared);
1615       MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1616       collector_->MarkObject(shared->code(), code_mark);
1617       collector_->MarkObject(shared, shared_mark);
1618     }
1619   }
1620 
1621  private:
1622   MarkCompactCollector* collector_;
1623 };
1624 
1625 
MarkInlinedFunctionsCode(Code * code)1626 void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1627   // For optimized functions we should retain both non-optimized version
1628   // of it's code and non-optimized version of all inlined functions.
1629   // This is required to support bailing out from inlined code.
1630   DeoptimizationInputData* data =
1631       DeoptimizationInputData::cast(code->deoptimization_data());
1632 
1633   FixedArray* literals = data->LiteralArray();
1634 
1635   for (int i = 0, count = data->InlinedFunctionCount()->value();
1636        i < count;
1637        i++) {
1638     JSFunction* inlined = JSFunction::cast(literals->get(i));
1639     Code* inlined_code = inlined->shared()->code();
1640     MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
1641     MarkObject(inlined_code, inlined_code_mark);
1642   }
1643 }
1644 
1645 
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1646 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1647                                                         ThreadLocalTop* top) {
1648   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1649     // Note: for the frame that has a pending lazy deoptimization
1650     // StackFrame::unchecked_code will return a non-optimized code object for
1651     // the outermost function and StackFrame::LookupCode will return
1652     // actual optimized code object.
1653     StackFrame* frame = it.frame();
1654     Code* code = frame->unchecked_code();
1655     MarkBit code_mark = Marking::MarkBitFrom(code);
1656     MarkObject(code, code_mark);
1657     if (frame->is_optimized()) {
1658       MarkInlinedFunctionsCode(frame->LookupCode());
1659     }
1660   }
1661 }
1662 
1663 
PrepareForCodeFlushing()1664 void MarkCompactCollector::PrepareForCodeFlushing() {
1665   ASSERT(heap() == Isolate::Current()->heap());
1666 
1667   // TODO(1609) Currently incremental marker does not support code flushing.
1668   if (!FLAG_flush_code || was_marked_incrementally_) {
1669     EnableCodeFlushing(false);
1670     return;
1671   }
1672 
1673 #ifdef ENABLE_DEBUGGER_SUPPORT
1674   if (heap()->isolate()->debug()->IsLoaded() ||
1675       heap()->isolate()->debug()->has_break_points()) {
1676     EnableCodeFlushing(false);
1677     return;
1678   }
1679 #endif
1680 
1681   EnableCodeFlushing(true);
1682 
1683   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1684   // relies on it being marked before any other descriptor array.
1685   HeapObject* descriptor_array = heap()->empty_descriptor_array();
1686   MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1687   MarkObject(descriptor_array, descriptor_array_mark);
1688 
1689   // Make sure we are not referencing the code from the stack.
1690   ASSERT(this == heap()->mark_compact_collector());
1691   PrepareThreadForCodeFlushing(heap()->isolate(),
1692                                heap()->isolate()->thread_local_top());
1693 
1694   // Iterate the archived stacks in all threads to check if
1695   // the code is referenced.
1696   CodeMarkingVisitor code_marking_visitor(this);
1697   heap()->isolate()->thread_manager()->IterateArchivedThreads(
1698       &code_marking_visitor);
1699 
1700   SharedFunctionInfoMarkingVisitor visitor(this);
1701   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1702   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1703 
1704   ProcessMarkingDeque();
1705 }
1706 
1707 
1708 // Visitor class for marking heap roots.
1709 class RootMarkingVisitor : public ObjectVisitor {
1710  public:
RootMarkingVisitor(Heap * heap)1711   explicit RootMarkingVisitor(Heap* heap)
1712     : collector_(heap->mark_compact_collector()) { }
1713 
VisitPointer(Object ** p)1714   void VisitPointer(Object** p) {
1715     MarkObjectByPointer(p);
1716   }
1717 
VisitPointers(Object ** start,Object ** end)1718   void VisitPointers(Object** start, Object** end) {
1719     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1720   }
1721 
1722  private:
MarkObjectByPointer(Object ** p)1723   void MarkObjectByPointer(Object** p) {
1724     if (!(*p)->IsHeapObject()) return;
1725 
1726     // Replace flat cons strings in place.
1727     HeapObject* object = ShortCircuitConsString(p);
1728     MarkBit mark_bit = Marking::MarkBitFrom(object);
1729     if (mark_bit.Get()) return;
1730 
1731     Map* map = object->map();
1732     // Mark the object.
1733     collector_->SetMark(object, mark_bit);
1734 
1735     // Mark the map pointer and body, and push them on the marking stack.
1736     MarkBit map_mark = Marking::MarkBitFrom(map);
1737     collector_->MarkObject(map, map_mark);
1738     StaticMarkingVisitor::IterateBody(map, object);
1739 
1740     // Mark all the objects reachable from the map and body.  May leave
1741     // overflowed objects in the heap.
1742     collector_->EmptyMarkingDeque();
1743   }
1744 
1745   MarkCompactCollector* collector_;
1746 };
1747 
1748 
1749 // Helper class for pruning the symbol table.
1750 class SymbolTableCleaner : public ObjectVisitor {
1751  public:
SymbolTableCleaner(Heap * heap)1752   explicit SymbolTableCleaner(Heap* heap)
1753     : heap_(heap), pointers_removed_(0) { }
1754 
VisitPointers(Object ** start,Object ** end)1755   virtual void VisitPointers(Object** start, Object** end) {
1756     // Visit all HeapObject pointers in [start, end).
1757     for (Object** p = start; p < end; p++) {
1758       Object* o = *p;
1759       if (o->IsHeapObject() &&
1760           !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1761         // Check if the symbol being pruned is an external symbol. We need to
1762         // delete the associated external data as this symbol is going away.
1763 
1764         // Since no objects have yet been moved we can safely access the map of
1765         // the object.
1766         if (o->IsExternalString()) {
1767           heap_->FinalizeExternalString(String::cast(*p));
1768         }
1769         // Set the entry to the_hole_value (as deleted).
1770         *p = heap_->the_hole_value();
1771         pointers_removed_++;
1772       }
1773     }
1774   }
1775 
PointersRemoved()1776   int PointersRemoved() {
1777     return pointers_removed_;
1778   }
1779 
1780  private:
1781   Heap* heap_;
1782   int pointers_removed_;
1783 };
1784 
1785 
1786 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1787 // are retained.
1788 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1789  public:
RetainAs(Object * object)1790   virtual Object* RetainAs(Object* object) {
1791     if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1792       return object;
1793     } else {
1794       return NULL;
1795     }
1796   }
1797 };
1798 
1799 
ProcessNewlyMarkedObject(HeapObject * object)1800 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1801   ASSERT(IsMarked(object));
1802   ASSERT(HEAP->Contains(object));
1803   if (object->IsMap()) {
1804     Map* map = Map::cast(object);
1805     heap_->ClearCacheOnMap(map);
1806 
1807     // When map collection is enabled we have to mark through map's transitions
1808     // in a special way to make transition links weak.
1809     // Only maps for subclasses of JSReceiver can have transitions.
1810     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1811     if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1812       MarkMapContents(map);
1813     } else {
1814       marking_deque_.PushBlack(map);
1815     }
1816   } else {
1817     marking_deque_.PushBlack(object);
1818   }
1819 }
1820 
1821 
MarkMapContents(Map * map)1822 void MarkCompactCollector::MarkMapContents(Map* map) {
1823   // Mark prototype transitions array but don't push it into marking stack.
1824   // This will make references from it weak. We will clean dead prototype
1825   // transitions in ClearNonLiveTransitions.
1826   FixedArray* prototype_transitions = map->prototype_transitions();
1827   MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1828   if (!mark.Get()) {
1829     mark.Set();
1830     MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
1831                                           prototype_transitions->Size());
1832   }
1833 
1834   Object** raw_descriptor_array_slot =
1835       HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
1836   Object* raw_descriptor_array = *raw_descriptor_array_slot;
1837   if (!raw_descriptor_array->IsSmi()) {
1838     MarkDescriptorArray(
1839         reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1840   }
1841 
1842   // Mark the Object* fields of the Map.
1843   // Since the descriptor array has been marked already, it is fine
1844   // that one of these fields contains a pointer to it.
1845   Object** start_slot = HeapObject::RawField(map,
1846                                              Map::kPointerFieldsBeginOffset);
1847 
1848   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1849 
1850   StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
1851 }
1852 
1853 
MarkAccessorPairSlot(HeapObject * accessors,int offset)1854 void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
1855                                                 int offset) {
1856   Object** slot = HeapObject::RawField(accessors, offset);
1857   HeapObject* accessor = HeapObject::cast(*slot);
1858   if (accessor->IsMap()) return;
1859   RecordSlot(slot, slot, accessor);
1860   MarkObjectAndPush(accessor);
1861 }
1862 
1863 
MarkDescriptorArray(DescriptorArray * descriptors)1864 void MarkCompactCollector::MarkDescriptorArray(
1865     DescriptorArray* descriptors) {
1866   MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
1867   if (descriptors_mark.Get()) return;
1868   // Empty descriptor array is marked as a root before any maps are marked.
1869   ASSERT(descriptors != heap()->empty_descriptor_array());
1870   SetMark(descriptors, descriptors_mark);
1871 
1872   FixedArray* contents = reinterpret_cast<FixedArray*>(
1873       descriptors->get(DescriptorArray::kContentArrayIndex));
1874   ASSERT(contents->IsHeapObject());
1875   ASSERT(!IsMarked(contents));
1876   ASSERT(contents->IsFixedArray());
1877   ASSERT(contents->length() >= 2);
1878   MarkBit contents_mark = Marking::MarkBitFrom(contents);
1879   SetMark(contents, contents_mark);
1880   // Contents contains (value, details) pairs.  If the details say that the type
1881   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1882   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1883   // live.  Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1884   // CONSTANT_TRANSITION is the value an Object* (a Map*).
1885   for (int i = 0; i < contents->length(); i += 2) {
1886     // If the pair (value, details) at index i, i+1 is not
1887     // a transition or null descriptor, mark the value.
1888     PropertyDetails details(Smi::cast(contents->get(i + 1)));
1889 
1890     Object** slot = contents->data_start() + i;
1891     if (!(*slot)->IsHeapObject()) continue;
1892     HeapObject* value = HeapObject::cast(*slot);
1893 
1894     RecordSlot(slot, slot, *slot);
1895 
1896     switch (details.type()) {
1897       case NORMAL:
1898       case FIELD:
1899       case CONSTANT_FUNCTION:
1900       case HANDLER:
1901       case INTERCEPTOR:
1902         MarkObjectAndPush(value);
1903         break;
1904       case CALLBACKS:
1905         if (!value->IsAccessorPair()) {
1906           MarkObjectAndPush(value);
1907         } else if (!MarkObjectWithoutPush(value)) {
1908           MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
1909           MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
1910         }
1911         break;
1912       case ELEMENTS_TRANSITION:
1913         // For maps with multiple elements transitions, the transition maps are
1914         // stored in a FixedArray. Keep the fixed array alive but not the maps
1915         // that it refers to.
1916         if (value->IsFixedArray()) MarkObjectWithoutPush(value);
1917         break;
1918       case MAP_TRANSITION:
1919       case CONSTANT_TRANSITION:
1920       case NULL_DESCRIPTOR:
1921         break;
1922     }
1923   }
1924   // The DescriptorArray descriptors contains a pointer to its contents array,
1925   // but the contents array is already marked.
1926   marking_deque_.PushBlack(descriptors);
1927 }
1928 
1929 
CreateBackPointers()1930 void MarkCompactCollector::CreateBackPointers() {
1931   HeapObjectIterator iterator(heap()->map_space());
1932   for (HeapObject* next_object = iterator.Next();
1933        next_object != NULL; next_object = iterator.Next()) {
1934     if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
1935       Map* map = Map::cast(next_object);
1936       STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1937       if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1938         map->CreateBackPointers();
1939       } else {
1940         ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
1941       }
1942     }
1943   }
1944 }
1945 
1946 
1947 // Fill the marking stack with overflowed objects returned by the given
1948 // iterator.  Stop when the marking stack is filled or the end of the space
1949 // is reached, whichever comes first.
1950 template<class T>
DiscoverGreyObjectsWithIterator(Heap * heap,MarkingDeque * marking_deque,T * it)1951 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1952                                             MarkingDeque* marking_deque,
1953                                             T* it) {
1954   // The caller should ensure that the marking stack is initially not full,
1955   // so that we don't waste effort pointlessly scanning for objects.
1956   ASSERT(!marking_deque->IsFull());
1957 
1958   Map* filler_map = heap->one_pointer_filler_map();
1959   for (HeapObject* object = it->Next();
1960        object != NULL;
1961        object = it->Next()) {
1962     MarkBit markbit = Marking::MarkBitFrom(object);
1963     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1964       Marking::GreyToBlack(markbit);
1965       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1966       marking_deque->PushBlack(object);
1967       if (marking_deque->IsFull()) return;
1968     }
1969   }
1970 }
1971 
1972 
1973 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1974 
1975 
DiscoverGreyObjectsOnPage(MarkingDeque * marking_deque,Page * p)1976 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1977   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1978   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1979   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1980   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1981 
1982   MarkBit::CellType* cells = p->markbits()->cells();
1983 
1984   int last_cell_index =
1985       Bitmap::IndexToCell(
1986           Bitmap::CellAlignIndex(
1987               p->AddressToMarkbitIndex(p->area_end())));
1988 
1989   Address cell_base = p->area_start();
1990   int cell_index = Bitmap::IndexToCell(
1991           Bitmap::CellAlignIndex(
1992               p->AddressToMarkbitIndex(cell_base)));
1993 
1994 
1995   for (;
1996        cell_index < last_cell_index;
1997        cell_index++, cell_base += 32 * kPointerSize) {
1998     ASSERT((unsigned)cell_index ==
1999         Bitmap::IndexToCell(
2000             Bitmap::CellAlignIndex(
2001                 p->AddressToMarkbitIndex(cell_base))));
2002 
2003     const MarkBit::CellType current_cell = cells[cell_index];
2004     if (current_cell == 0) continue;
2005 
2006     const MarkBit::CellType next_cell = cells[cell_index + 1];
2007     MarkBit::CellType grey_objects = current_cell &
2008         ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2009 
2010     int offset = 0;
2011     while (grey_objects != 0) {
2012       int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2013       grey_objects >>= trailing_zeros;
2014       offset += trailing_zeros;
2015       MarkBit markbit(&cells[cell_index], 1 << offset, false);
2016       ASSERT(Marking::IsGrey(markbit));
2017       Marking::GreyToBlack(markbit);
2018       Address addr = cell_base + offset * kPointerSize;
2019       HeapObject* object = HeapObject::FromAddress(addr);
2020       MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2021       marking_deque->PushBlack(object);
2022       if (marking_deque->IsFull()) return;
2023       offset += 2;
2024       grey_objects >>= 2;
2025     }
2026 
2027     grey_objects >>= (Bitmap::kBitsPerCell - 1);
2028   }
2029 }
2030 
2031 
DiscoverGreyObjectsInSpace(Heap * heap,MarkingDeque * marking_deque,PagedSpace * space)2032 static void DiscoverGreyObjectsInSpace(Heap* heap,
2033                                        MarkingDeque* marking_deque,
2034                                        PagedSpace* space) {
2035   if (!space->was_swept_conservatively()) {
2036     HeapObjectIterator it(space);
2037     DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2038   } else {
2039     PageIterator it(space);
2040     while (it.has_next()) {
2041       Page* p = it.next();
2042       DiscoverGreyObjectsOnPage(marking_deque, p);
2043       if (marking_deque->IsFull()) return;
2044     }
2045   }
2046 }
2047 
2048 
IsUnmarkedHeapObject(Object ** p)2049 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2050   Object* o = *p;
2051   if (!o->IsHeapObject()) return false;
2052   HeapObject* heap_object = HeapObject::cast(o);
2053   MarkBit mark = Marking::MarkBitFrom(heap_object);
2054   return !mark.Get();
2055 }
2056 
2057 
MarkSymbolTable()2058 void MarkCompactCollector::MarkSymbolTable() {
2059   SymbolTable* symbol_table = heap()->symbol_table();
2060   // Mark the symbol table itself.
2061   MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
2062   SetMark(symbol_table, symbol_table_mark);
2063   // Explicitly mark the prefix.
2064   MarkingVisitor marker(heap());
2065   symbol_table->IteratePrefix(&marker);
2066   ProcessMarkingDeque();
2067 }
2068 
2069 
MarkRoots(RootMarkingVisitor * visitor)2070 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2071   // Mark the heap roots including global variables, stack variables,
2072   // etc., and all objects reachable from them.
2073   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2074 
2075   // Handle the symbol table specially.
2076   MarkSymbolTable();
2077 
2078   // There may be overflowed objects in the heap.  Visit them now.
2079   while (marking_deque_.overflowed()) {
2080     RefillMarkingDeque();
2081     EmptyMarkingDeque();
2082   }
2083 }
2084 
2085 
MarkObjectGroups()2086 void MarkCompactCollector::MarkObjectGroups() {
2087   List<ObjectGroup*>* object_groups =
2088       heap()->isolate()->global_handles()->object_groups();
2089 
2090   int last = 0;
2091   for (int i = 0; i < object_groups->length(); i++) {
2092     ObjectGroup* entry = object_groups->at(i);
2093     ASSERT(entry != NULL);
2094 
2095     Object*** objects = entry->objects_;
2096     bool group_marked = false;
2097     for (size_t j = 0; j < entry->length_; j++) {
2098       Object* object = *objects[j];
2099       if (object->IsHeapObject()) {
2100         HeapObject* heap_object = HeapObject::cast(object);
2101         MarkBit mark = Marking::MarkBitFrom(heap_object);
2102         if (mark.Get()) {
2103           group_marked = true;
2104           break;
2105         }
2106       }
2107     }
2108 
2109     if (!group_marked) {
2110       (*object_groups)[last++] = entry;
2111       continue;
2112     }
2113 
2114     // An object in the group is marked, so mark as grey all white heap
2115     // objects in the group.
2116     for (size_t j = 0; j < entry->length_; ++j) {
2117       Object* object = *objects[j];
2118       if (object->IsHeapObject()) {
2119         HeapObject* heap_object = HeapObject::cast(object);
2120         MarkBit mark = Marking::MarkBitFrom(heap_object);
2121         MarkObject(heap_object, mark);
2122       }
2123     }
2124 
2125     // Once the entire group has been colored grey, set the object group
2126     // to NULL so it won't be processed again.
2127     entry->Dispose();
2128     object_groups->at(i) = NULL;
2129   }
2130   object_groups->Rewind(last);
2131 }
2132 
2133 
MarkImplicitRefGroups()2134 void MarkCompactCollector::MarkImplicitRefGroups() {
2135   List<ImplicitRefGroup*>* ref_groups =
2136       heap()->isolate()->global_handles()->implicit_ref_groups();
2137 
2138   int last = 0;
2139   for (int i = 0; i < ref_groups->length(); i++) {
2140     ImplicitRefGroup* entry = ref_groups->at(i);
2141     ASSERT(entry != NULL);
2142 
2143     if (!IsMarked(*entry->parent_)) {
2144       (*ref_groups)[last++] = entry;
2145       continue;
2146     }
2147 
2148     Object*** children = entry->children_;
2149     // A parent object is marked, so mark all child heap objects.
2150     for (size_t j = 0; j < entry->length_; ++j) {
2151       if ((*children[j])->IsHeapObject()) {
2152         HeapObject* child = HeapObject::cast(*children[j]);
2153         MarkBit mark = Marking::MarkBitFrom(child);
2154         MarkObject(child, mark);
2155       }
2156     }
2157 
2158     // Once the entire group has been marked, dispose it because it's
2159     // not needed anymore.
2160     entry->Dispose();
2161   }
2162   ref_groups->Rewind(last);
2163 }
2164 
2165 
2166 // Mark all objects reachable from the objects on the marking stack.
2167 // Before: the marking stack contains zero or more heap object pointers.
2168 // After: the marking stack is empty, and all objects reachable from the
2169 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingDeque()2170 void MarkCompactCollector::EmptyMarkingDeque() {
2171   while (!marking_deque_.IsEmpty()) {
2172     while (!marking_deque_.IsEmpty()) {
2173       HeapObject* object = marking_deque_.Pop();
2174       ASSERT(object->IsHeapObject());
2175       ASSERT(heap()->Contains(object));
2176       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2177 
2178       Map* map = object->map();
2179       MarkBit map_mark = Marking::MarkBitFrom(map);
2180       MarkObject(map, map_mark);
2181 
2182       StaticMarkingVisitor::IterateBody(map, object);
2183     }
2184 
2185     // Process encountered weak maps, mark objects only reachable by those
2186     // weak maps and repeat until fix-point is reached.
2187     ProcessWeakMaps();
2188   }
2189 }
2190 
2191 
2192 // Sweep the heap for overflowed objects, clear their overflow bits, and
2193 // push them on the marking stack.  Stop early if the marking stack fills
2194 // before sweeping completes.  If sweeping completes, there are no remaining
2195 // overflowed objects in the heap so the overflow flag on the markings stack
2196 // is cleared.
RefillMarkingDeque()2197 void MarkCompactCollector::RefillMarkingDeque() {
2198   ASSERT(marking_deque_.overflowed());
2199 
2200   SemiSpaceIterator new_it(heap()->new_space());
2201   DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
2202   if (marking_deque_.IsFull()) return;
2203 
2204   DiscoverGreyObjectsInSpace(heap(),
2205                              &marking_deque_,
2206                              heap()->old_pointer_space());
2207   if (marking_deque_.IsFull()) return;
2208 
2209   DiscoverGreyObjectsInSpace(heap(),
2210                              &marking_deque_,
2211                              heap()->old_data_space());
2212   if (marking_deque_.IsFull()) return;
2213 
2214   DiscoverGreyObjectsInSpace(heap(),
2215                              &marking_deque_,
2216                              heap()->code_space());
2217   if (marking_deque_.IsFull()) return;
2218 
2219   DiscoverGreyObjectsInSpace(heap(),
2220                              &marking_deque_,
2221                              heap()->map_space());
2222   if (marking_deque_.IsFull()) return;
2223 
2224   DiscoverGreyObjectsInSpace(heap(),
2225                              &marking_deque_,
2226                              heap()->cell_space());
2227   if (marking_deque_.IsFull()) return;
2228 
2229   LargeObjectIterator lo_it(heap()->lo_space());
2230   DiscoverGreyObjectsWithIterator(heap(),
2231                                   &marking_deque_,
2232                                   &lo_it);
2233   if (marking_deque_.IsFull()) return;
2234 
2235   marking_deque_.ClearOverflowed();
2236 }
2237 
2238 
2239 // Mark all objects reachable (transitively) from objects on the marking
2240 // stack.  Before: the marking stack contains zero or more heap object
2241 // pointers.  After: the marking stack is empty and there are no overflowed
2242 // objects in the heap.
ProcessMarkingDeque()2243 void MarkCompactCollector::ProcessMarkingDeque() {
2244   EmptyMarkingDeque();
2245   while (marking_deque_.overflowed()) {
2246     RefillMarkingDeque();
2247     EmptyMarkingDeque();
2248   }
2249 }
2250 
2251 
ProcessExternalMarking()2252 void MarkCompactCollector::ProcessExternalMarking() {
2253   bool work_to_do = true;
2254   ASSERT(marking_deque_.IsEmpty());
2255   while (work_to_do) {
2256     MarkObjectGroups();
2257     MarkImplicitRefGroups();
2258     work_to_do = !marking_deque_.IsEmpty();
2259     ProcessMarkingDeque();
2260   }
2261 }
2262 
2263 
MarkLiveObjects()2264 void MarkCompactCollector::MarkLiveObjects() {
2265   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2266   // The recursive GC marker detects when it is nearing stack overflow,
2267   // and switches to a different marking system.  JS interrupts interfere
2268   // with the C stack limit check.
2269   PostponeInterruptsScope postpone(heap()->isolate());
2270 
2271   bool incremental_marking_overflowed = false;
2272   IncrementalMarking* incremental_marking = heap_->incremental_marking();
2273   if (was_marked_incrementally_) {
2274     // Finalize the incremental marking and check whether we had an overflow.
2275     // Both markers use grey color to mark overflowed objects so
2276     // non-incremental marker can deal with them as if overflow
2277     // occured during normal marking.
2278     // But incremental marker uses a separate marking deque
2279     // so we have to explicitly copy it's overflow state.
2280     incremental_marking->Finalize();
2281     incremental_marking_overflowed =
2282         incremental_marking->marking_deque()->overflowed();
2283     incremental_marking->marking_deque()->ClearOverflowed();
2284   } else {
2285     // Abort any pending incremental activities e.g. incremental sweeping.
2286     incremental_marking->Abort();
2287   }
2288 
2289 #ifdef DEBUG
2290   ASSERT(state_ == PREPARE_GC);
2291   state_ = MARK_LIVE_OBJECTS;
2292 #endif
2293   // The to space contains live objects, a page in from space is used as a
2294   // marking stack.
2295   Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2296   Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2297   if (FLAG_force_marking_deque_overflows) {
2298     marking_deque_end = marking_deque_start + 64 * kPointerSize;
2299   }
2300   marking_deque_.Initialize(marking_deque_start,
2301                             marking_deque_end);
2302   ASSERT(!marking_deque_.overflowed());
2303 
2304   if (incremental_marking_overflowed) {
2305     // There are overflowed objects left in the heap after incremental marking.
2306     marking_deque_.SetOverflowed();
2307   }
2308 
2309   PrepareForCodeFlushing();
2310 
2311   if (was_marked_incrementally_) {
2312     // There is no write barrier on cells so we have to scan them now at the end
2313     // of the incremental marking.
2314     {
2315       HeapObjectIterator cell_iterator(heap()->cell_space());
2316       HeapObject* cell;
2317       while ((cell = cell_iterator.Next()) != NULL) {
2318         ASSERT(cell->IsJSGlobalPropertyCell());
2319         if (IsMarked(cell)) {
2320           int offset = JSGlobalPropertyCell::kValueOffset;
2321           StaticMarkingVisitor::VisitPointer(
2322               heap(),
2323               reinterpret_cast<Object**>(cell->address() + offset));
2324         }
2325       }
2326     }
2327   }
2328 
2329   RootMarkingVisitor root_visitor(heap());
2330   MarkRoots(&root_visitor);
2331 
2332   // The objects reachable from the roots are marked, yet unreachable
2333   // objects are unmarked.  Mark objects reachable due to host
2334   // application specific logic.
2335   ProcessExternalMarking();
2336 
2337   // The objects reachable from the roots or object groups are marked,
2338   // yet unreachable objects are unmarked.  Mark objects reachable
2339   // only from weak global handles.
2340   //
2341   // First we identify nonlive weak handles and mark them as pending
2342   // destruction.
2343   heap()->isolate()->global_handles()->IdentifyWeakHandles(
2344       &IsUnmarkedHeapObject);
2345   // Then we mark the objects and process the transitive closure.
2346   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2347   while (marking_deque_.overflowed()) {
2348     RefillMarkingDeque();
2349     EmptyMarkingDeque();
2350   }
2351 
2352   // Repeat host application specific marking to mark unmarked objects
2353   // reachable from the weak roots.
2354   ProcessExternalMarking();
2355 
2356   AfterMarking();
2357 }
2358 
2359 
AfterMarking()2360 void MarkCompactCollector::AfterMarking() {
2361   // Object literal map caches reference symbols (cache keys) and maps
2362   // (cache values). At this point still useful maps have already been
2363   // marked. Mark the keys for the alive values before we process the
2364   // symbol table.
2365   ProcessMapCaches();
2366 
2367   // Prune the symbol table removing all symbols only pointed to by the
2368   // symbol table.  Cannot use symbol_table() here because the symbol
2369   // table is marked.
2370   SymbolTable* symbol_table = heap()->symbol_table();
2371   SymbolTableCleaner v(heap());
2372   symbol_table->IterateElements(&v);
2373   symbol_table->ElementsRemoved(v.PointersRemoved());
2374   heap()->external_string_table_.Iterate(&v);
2375   heap()->external_string_table_.CleanUp();
2376 
2377   // Process the weak references.
2378   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2379   heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2380 
2381   // Remove object groups after marking phase.
2382   heap()->isolate()->global_handles()->RemoveObjectGroups();
2383   heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2384 
2385   // Flush code from collected candidates.
2386   if (is_code_flushing_enabled()) {
2387     code_flusher_->ProcessCandidates();
2388   }
2389 
2390   if (!FLAG_watch_ic_patching) {
2391     // Clean up dead objects from the runtime profiler.
2392     heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2393   }
2394 }
2395 
2396 
ProcessMapCaches()2397 void MarkCompactCollector::ProcessMapCaches() {
2398   Object* raw_context = heap()->global_contexts_list_;
2399   while (raw_context != heap()->undefined_value()) {
2400     Context* context = reinterpret_cast<Context*>(raw_context);
2401     if (IsMarked(context)) {
2402       HeapObject* raw_map_cache =
2403           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2404       // A map cache may be reachable from the stack. In this case
2405       // it's already transitively marked and it's too late to clean
2406       // up its parts.
2407       if (!IsMarked(raw_map_cache) &&
2408           raw_map_cache != heap()->undefined_value()) {
2409         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2410         int existing_elements = map_cache->NumberOfElements();
2411         int used_elements = 0;
2412         for (int i = MapCache::kElementsStartIndex;
2413              i < map_cache->length();
2414              i += MapCache::kEntrySize) {
2415           Object* raw_key = map_cache->get(i);
2416           if (raw_key == heap()->undefined_value() ||
2417               raw_key == heap()->the_hole_value()) continue;
2418           STATIC_ASSERT(MapCache::kEntrySize == 2);
2419           Object* raw_map = map_cache->get(i + 1);
2420           if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2421             ++used_elements;
2422           } else {
2423             // Delete useless entries with unmarked maps.
2424             ASSERT(raw_map->IsMap());
2425             map_cache->set_the_hole(i);
2426             map_cache->set_the_hole(i + 1);
2427           }
2428         }
2429         if (used_elements == 0) {
2430           context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2431         } else {
2432           // Note: we don't actually shrink the cache here to avoid
2433           // extra complexity during GC. We rely on subsequent cache
2434           // usages (EnsureCapacity) to do this.
2435           map_cache->ElementsRemoved(existing_elements - used_elements);
2436           MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2437           MarkObject(map_cache, map_cache_markbit);
2438         }
2439       }
2440     }
2441     // Move to next element in the list.
2442     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2443   }
2444   ProcessMarkingDeque();
2445 }
2446 
2447 
ReattachInitialMaps()2448 void MarkCompactCollector::ReattachInitialMaps() {
2449   HeapObjectIterator map_iterator(heap()->map_space());
2450   for (HeapObject* obj = map_iterator.Next();
2451        obj != NULL;
2452        obj = map_iterator.Next()) {
2453     if (obj->IsFreeSpace()) continue;
2454     Map* map = Map::cast(obj);
2455 
2456     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2457     if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2458 
2459     if (map->attached_to_shared_function_info()) {
2460       JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2461     }
2462   }
2463 }
2464 
2465 
ClearNonLiveTransitions()2466 void MarkCompactCollector::ClearNonLiveTransitions() {
2467   HeapObjectIterator map_iterator(heap()->map_space());
2468   // Iterate over the map space, setting map transitions that go from
2469   // a marked map to an unmarked map to null transitions.  At the same time,
2470   // set all the prototype fields of maps back to their original value,
2471   // dropping the back pointers temporarily stored in the prototype field.
2472   // Setting the prototype field requires following the linked list of
2473   // back pointers, reversing them all at once.  This allows us to find
2474   // those maps with map transitions that need to be nulled, and only
2475   // scan the descriptor arrays of those maps, not all maps.
2476   // All of these actions are carried out only on maps of JSObjects
2477   // and related subtypes.
2478   for (HeapObject* obj = map_iterator.Next();
2479        obj != NULL; obj = map_iterator.Next()) {
2480     Map* map = reinterpret_cast<Map*>(obj);
2481     MarkBit map_mark = Marking::MarkBitFrom(map);
2482     if (map->IsFreeSpace()) continue;
2483 
2484     ASSERT(map->IsMap());
2485     // Only JSObject and subtypes have map transitions and back pointers.
2486     STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
2487     if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
2488 
2489     if (map_mark.Get() &&
2490         map->attached_to_shared_function_info()) {
2491       // This map is used for inobject slack tracking and has been detached
2492       // from SharedFunctionInfo during the mark phase.
2493       // Since it survived the GC, reattach it now.
2494       map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2495     }
2496 
2497     ClearNonLivePrototypeTransitions(map);
2498     ClearNonLiveMapTransitions(map, map_mark);
2499   }
2500 }
2501 
2502 
ClearNonLivePrototypeTransitions(Map * map)2503 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2504   int number_of_transitions = map->NumberOfProtoTransitions();
2505   FixedArray* prototype_transitions = map->prototype_transitions();
2506 
2507   int new_number_of_transitions = 0;
2508   const int header = Map::kProtoTransitionHeaderSize;
2509   const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2510   const int map_offset = header + Map::kProtoTransitionMapOffset;
2511   const int step = Map::kProtoTransitionElementsPerEntry;
2512   for (int i = 0; i < number_of_transitions; i++) {
2513     Object* prototype = prototype_transitions->get(proto_offset + i * step);
2514     Object* cached_map = prototype_transitions->get(map_offset + i * step);
2515     if (IsMarked(prototype) && IsMarked(cached_map)) {
2516       int proto_index = proto_offset + new_number_of_transitions * step;
2517       int map_index = map_offset + new_number_of_transitions * step;
2518       if (new_number_of_transitions != i) {
2519         prototype_transitions->set_unchecked(
2520             heap_,
2521             proto_index,
2522             prototype,
2523             UPDATE_WRITE_BARRIER);
2524         prototype_transitions->set_unchecked(
2525             heap_,
2526             map_index,
2527             cached_map,
2528             SKIP_WRITE_BARRIER);
2529       }
2530       Object** slot =
2531           HeapObject::RawField(prototype_transitions,
2532                                FixedArray::OffsetOfElementAt(proto_index));
2533       RecordSlot(slot, slot, prototype);
2534       new_number_of_transitions++;
2535     }
2536   }
2537 
2538   if (new_number_of_transitions != number_of_transitions) {
2539     map->SetNumberOfProtoTransitions(new_number_of_transitions);
2540   }
2541 
2542   // Fill slots that became free with undefined value.
2543   for (int i = new_number_of_transitions * step;
2544        i < number_of_transitions * step;
2545        i++) {
2546     prototype_transitions->set_undefined(heap_, header + i);
2547   }
2548 }
2549 
2550 
ClearNonLiveMapTransitions(Map * map,MarkBit map_mark)2551 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2552                                                       MarkBit map_mark) {
2553   // Follow the chain of back pointers to find the prototype.
2554   Object* real_prototype = map;
2555   while (real_prototype->IsMap()) {
2556     real_prototype = Map::cast(real_prototype)->prototype();
2557     ASSERT(real_prototype->IsHeapObject());
2558   }
2559 
2560   // Follow back pointers, setting them to prototype, clearing map transitions
2561   // when necessary.
2562   Map* current = map;
2563   bool current_is_alive = map_mark.Get();
2564   bool on_dead_path = !current_is_alive;
2565   while (current->IsMap()) {
2566     Object* next = current->prototype();
2567     // There should never be a dead map above a live map.
2568     ASSERT(on_dead_path || current_is_alive);
2569 
2570     // A live map above a dead map indicates a dead transition. This test will
2571     // always be false on the first iteration.
2572     if (on_dead_path && current_is_alive) {
2573       on_dead_path = false;
2574       current->ClearNonLiveTransitions(heap(), real_prototype);
2575     }
2576 
2577     Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
2578     *slot = real_prototype;
2579     if (current_is_alive) RecordSlot(slot, slot, real_prototype);
2580 
2581     current = reinterpret_cast<Map*>(next);
2582     current_is_alive = Marking::MarkBitFrom(current).Get();
2583   }
2584 }
2585 
2586 
ProcessWeakMaps()2587 void MarkCompactCollector::ProcessWeakMaps() {
2588   Object* weak_map_obj = encountered_weak_maps();
2589   while (weak_map_obj != Smi::FromInt(0)) {
2590     ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2591     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2592     ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2593     for (int i = 0; i < table->Capacity(); i++) {
2594       if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2595         Object* value = table->get(table->EntryToValueIndex(i));
2596         StaticMarkingVisitor::VisitPointer(heap(), &value);
2597         table->set_unchecked(heap(),
2598                              table->EntryToValueIndex(i),
2599                              value,
2600                              UPDATE_WRITE_BARRIER);
2601       }
2602     }
2603     weak_map_obj = weak_map->next();
2604   }
2605 }
2606 
2607 
ClearWeakMaps()2608 void MarkCompactCollector::ClearWeakMaps() {
2609   Object* weak_map_obj = encountered_weak_maps();
2610   while (weak_map_obj != Smi::FromInt(0)) {
2611     ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2612     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2613     ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2614     for (int i = 0; i < table->Capacity(); i++) {
2615       if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2616         table->RemoveEntry(i);
2617       }
2618     }
2619     weak_map_obj = weak_map->next();
2620     weak_map->set_next(Smi::FromInt(0));
2621   }
2622   set_encountered_weak_maps(Smi::FromInt(0));
2623 }
2624 
2625 
2626 // We scavange new space simultaneously with sweeping. This is done in two
2627 // passes.
2628 //
2629 // The first pass migrates all alive objects from one semispace to another or
2630 // promotes them to old space.  Forwarding address is written directly into
2631 // first word of object without any encoding.  If object is dead we write
2632 // NULL as a forwarding address.
2633 //
2634 // The second pass updates pointers to new space in all spaces.  It is possible
2635 // to encounter pointers to dead new space objects during traversal of pointers
2636 // to new space.  We should clear them to avoid encountering them during next
2637 // pointer iteration.  This is an issue if the store buffer overflows and we
2638 // have to scan the entire old space, including dead objects, looking for
2639 // pointers to new space.
MigrateObject(Address dst,Address src,int size,AllocationSpace dest)2640 void MarkCompactCollector::MigrateObject(Address dst,
2641                                          Address src,
2642                                          int size,
2643                                          AllocationSpace dest) {
2644   HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2645   if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2646     Address src_slot = src;
2647     Address dst_slot = dst;
2648     ASSERT(IsAligned(size, kPointerSize));
2649 
2650     for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2651       Object* value = Memory::Object_at(src_slot);
2652 
2653       Memory::Object_at(dst_slot) = value;
2654 
2655       if (heap_->InNewSpace(value)) {
2656         heap_->store_buffer()->Mark(dst_slot);
2657       } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2658         SlotsBuffer::AddTo(&slots_buffer_allocator_,
2659                            &migration_slots_buffer_,
2660                            reinterpret_cast<Object**>(dst_slot),
2661                            SlotsBuffer::IGNORE_OVERFLOW);
2662       }
2663 
2664       src_slot += kPointerSize;
2665       dst_slot += kPointerSize;
2666     }
2667 
2668     if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2669       Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2670       Address code_entry = Memory::Address_at(code_entry_slot);
2671 
2672       if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2673         SlotsBuffer::AddTo(&slots_buffer_allocator_,
2674                            &migration_slots_buffer_,
2675                            SlotsBuffer::CODE_ENTRY_SLOT,
2676                            code_entry_slot,
2677                            SlotsBuffer::IGNORE_OVERFLOW);
2678       }
2679     }
2680   } else if (dest == CODE_SPACE) {
2681     PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2682     heap()->MoveBlock(dst, src, size);
2683     SlotsBuffer::AddTo(&slots_buffer_allocator_,
2684                        &migration_slots_buffer_,
2685                        SlotsBuffer::RELOCATED_CODE_OBJECT,
2686                        dst,
2687                        SlotsBuffer::IGNORE_OVERFLOW);
2688     Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2689   } else {
2690     ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2691     heap()->MoveBlock(dst, src, size);
2692   }
2693   Memory::Address_at(src) = dst;
2694 }
2695 
2696 
2697 // Visitor for updating pointers from live objects in old spaces to new space.
2698 // It does not expect to encounter pointers to dead objects.
2699 class PointersUpdatingVisitor: public ObjectVisitor {
2700  public:
PointersUpdatingVisitor(Heap * heap)2701   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2702 
VisitPointer(Object ** p)2703   void VisitPointer(Object** p) {
2704     UpdatePointer(p);
2705   }
2706 
VisitPointers(Object ** start,Object ** end)2707   void VisitPointers(Object** start, Object** end) {
2708     for (Object** p = start; p < end; p++) UpdatePointer(p);
2709   }
2710 
VisitEmbeddedPointer(RelocInfo * rinfo)2711   void VisitEmbeddedPointer(RelocInfo* rinfo) {
2712     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2713     Object* target = rinfo->target_object();
2714     VisitPointer(&target);
2715     rinfo->set_target_object(target);
2716   }
2717 
VisitCodeTarget(RelocInfo * rinfo)2718   void VisitCodeTarget(RelocInfo* rinfo) {
2719     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2720     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2721     VisitPointer(&target);
2722     rinfo->set_target_address(Code::cast(target)->instruction_start());
2723   }
2724 
VisitDebugTarget(RelocInfo * rinfo)2725   void VisitDebugTarget(RelocInfo* rinfo) {
2726     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2727             rinfo->IsPatchedReturnSequence()) ||
2728            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2729             rinfo->IsPatchedDebugBreakSlotSequence()));
2730     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2731     VisitPointer(&target);
2732     rinfo->set_call_address(Code::cast(target)->instruction_start());
2733   }
2734 
UpdateSlot(Heap * heap,Object ** slot)2735   static inline void UpdateSlot(Heap* heap, Object** slot) {
2736     Object* obj = *slot;
2737 
2738     if (!obj->IsHeapObject()) return;
2739 
2740     HeapObject* heap_obj = HeapObject::cast(obj);
2741 
2742     MapWord map_word = heap_obj->map_word();
2743     if (map_word.IsForwardingAddress()) {
2744       ASSERT(heap->InFromSpace(heap_obj) ||
2745              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2746       HeapObject* target = map_word.ToForwardingAddress();
2747       *slot = target;
2748       ASSERT(!heap->InFromSpace(target) &&
2749              !MarkCompactCollector::IsOnEvacuationCandidate(target));
2750     }
2751   }
2752 
2753  private:
UpdatePointer(Object ** p)2754   inline void UpdatePointer(Object** p) {
2755     UpdateSlot(heap_, p);
2756   }
2757 
2758   Heap* heap_;
2759 };
2760 
2761 
UpdatePointer(HeapObject ** p,HeapObject * object)2762 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2763   ASSERT(*p == object);
2764 
2765   Address old_addr = object->address();
2766 
2767   Address new_addr = Memory::Address_at(old_addr);
2768 
2769   // The new space sweep will overwrite the map word of dead objects
2770   // with NULL. In this case we do not need to transfer this entry to
2771   // the store buffer which we are rebuilding.
2772   if (new_addr != NULL) {
2773     *p = HeapObject::FromAddress(new_addr);
2774   } else {
2775     // We have to zap this pointer, because the store buffer may overflow later,
2776     // and then we have to scan the entire heap and we don't want to find
2777     // spurious newspace pointers in the old space.
2778     *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
2779   }
2780 }
2781 
2782 
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2783 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2784                                                          Object** p) {
2785   MapWord map_word = HeapObject::cast(*p)->map_word();
2786 
2787   if (map_word.IsForwardingAddress()) {
2788     return String::cast(map_word.ToForwardingAddress());
2789   }
2790 
2791   return String::cast(*p);
2792 }
2793 
2794 
TryPromoteObject(HeapObject * object,int object_size)2795 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2796                                             int object_size) {
2797   Object* result;
2798 
2799   if (object_size > Page::kMaxNonCodeHeapObjectSize) {
2800     MaybeObject* maybe_result =
2801         heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2802     if (maybe_result->ToObject(&result)) {
2803       HeapObject* target = HeapObject::cast(result);
2804       MigrateObject(target->address(),
2805                     object->address(),
2806                     object_size,
2807                     LO_SPACE);
2808       heap()->mark_compact_collector()->tracer()->
2809           increment_promoted_objects_size(object_size);
2810       return true;
2811     }
2812   } else {
2813     OldSpace* target_space = heap()->TargetSpace(object);
2814 
2815     ASSERT(target_space == heap()->old_pointer_space() ||
2816            target_space == heap()->old_data_space());
2817     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2818     if (maybe_result->ToObject(&result)) {
2819       HeapObject* target = HeapObject::cast(result);
2820       MigrateObject(target->address(),
2821                     object->address(),
2822                     object_size,
2823                     target_space->identity());
2824       heap()->mark_compact_collector()->tracer()->
2825           increment_promoted_objects_size(object_size);
2826       return true;
2827     }
2828   }
2829 
2830   return false;
2831 }
2832 
2833 
EvacuateNewSpace()2834 void MarkCompactCollector::EvacuateNewSpace() {
2835   // There are soft limits in the allocation code, designed trigger a mark
2836   // sweep collection by failing allocations.  But since we are already in
2837   // a mark-sweep allocation, there is no sense in trying to trigger one.
2838   AlwaysAllocateScope scope;
2839   heap()->CheckNewSpaceExpansionCriteria();
2840 
2841   NewSpace* new_space = heap()->new_space();
2842 
2843   // Store allocation range before flipping semispaces.
2844   Address from_bottom = new_space->bottom();
2845   Address from_top = new_space->top();
2846 
2847   // Flip the semispaces.  After flipping, to space is empty, from space has
2848   // live objects.
2849   new_space->Flip();
2850   new_space->ResetAllocationInfo();
2851 
2852   int survivors_size = 0;
2853 
2854   // First pass: traverse all objects in inactive semispace, remove marks,
2855   // migrate live objects and write forwarding addresses.  This stage puts
2856   // new entries in the store buffer and may cause some pages to be marked
2857   // scan-on-scavenge.
2858   SemiSpaceIterator from_it(from_bottom, from_top);
2859   for (HeapObject* object = from_it.Next();
2860        object != NULL;
2861        object = from_it.Next()) {
2862     MarkBit mark_bit = Marking::MarkBitFrom(object);
2863     if (mark_bit.Get()) {
2864       mark_bit.Clear();
2865       // Don't bother decrementing live bytes count. We'll discard the
2866       // entire page at the end.
2867       int size = object->Size();
2868       survivors_size += size;
2869 
2870       // Aggressively promote young survivors to the old space.
2871       if (TryPromoteObject(object, size)) {
2872         continue;
2873       }
2874 
2875       // Promotion failed. Just migrate object to another semispace.
2876       MaybeObject* allocation = new_space->AllocateRaw(size);
2877       if (allocation->IsFailure()) {
2878         if (!new_space->AddFreshPage()) {
2879           // Shouldn't happen. We are sweeping linearly, and to-space
2880           // has the same number of pages as from-space, so there is
2881           // always room.
2882           UNREACHABLE();
2883         }
2884         allocation = new_space->AllocateRaw(size);
2885         ASSERT(!allocation->IsFailure());
2886       }
2887       Object* target = allocation->ToObjectUnchecked();
2888 
2889       MigrateObject(HeapObject::cast(target)->address(),
2890                     object->address(),
2891                     size,
2892                     NEW_SPACE);
2893     } else {
2894       // Process the dead object before we write a NULL into its header.
2895       LiveObjectList::ProcessNonLive(object);
2896 
2897       // Mark dead objects in the new space with null in their map field.
2898       Memory::Address_at(object->address()) = NULL;
2899     }
2900   }
2901 
2902   heap_->IncrementYoungSurvivorsCounter(survivors_size);
2903   new_space->set_age_mark(new_space->top());
2904 }
2905 
2906 
EvacuateLiveObjectsFromPage(Page * p)2907 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2908   AlwaysAllocateScope always_allocate;
2909   PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2910   ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2911   MarkBit::CellType* cells = p->markbits()->cells();
2912   p->MarkSweptPrecisely();
2913 
2914   int last_cell_index =
2915       Bitmap::IndexToCell(
2916           Bitmap::CellAlignIndex(
2917               p->AddressToMarkbitIndex(p->area_end())));
2918 
2919   Address cell_base = p->area_start();
2920   int cell_index = Bitmap::IndexToCell(
2921           Bitmap::CellAlignIndex(
2922               p->AddressToMarkbitIndex(cell_base)));
2923 
2924   int offsets[16];
2925 
2926   for (;
2927        cell_index < last_cell_index;
2928        cell_index++, cell_base += 32 * kPointerSize) {
2929     ASSERT((unsigned)cell_index ==
2930         Bitmap::IndexToCell(
2931             Bitmap::CellAlignIndex(
2932                 p->AddressToMarkbitIndex(cell_base))));
2933     if (cells[cell_index] == 0) continue;
2934 
2935     int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2936     for (int i = 0; i < live_objects; i++) {
2937       Address object_addr = cell_base + offsets[i] * kPointerSize;
2938       HeapObject* object = HeapObject::FromAddress(object_addr);
2939       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2940 
2941       int size = object->Size();
2942 
2943       MaybeObject* target = space->AllocateRaw(size);
2944       if (target->IsFailure()) {
2945         // OS refused to give us memory.
2946         V8::FatalProcessOutOfMemory("Evacuation");
2947         return;
2948       }
2949 
2950       Object* target_object = target->ToObjectUnchecked();
2951 
2952       MigrateObject(HeapObject::cast(target_object)->address(),
2953                     object_addr,
2954                     size,
2955                     space->identity());
2956       ASSERT(object->map_word().IsForwardingAddress());
2957     }
2958 
2959     // Clear marking bits for current cell.
2960     cells[cell_index] = 0;
2961   }
2962   p->ResetLiveBytes();
2963 }
2964 
2965 
EvacuatePages()2966 void MarkCompactCollector::EvacuatePages() {
2967   int npages = evacuation_candidates_.length();
2968   for (int i = 0; i < npages; i++) {
2969     Page* p = evacuation_candidates_[i];
2970     ASSERT(p->IsEvacuationCandidate() ||
2971            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2972     if (p->IsEvacuationCandidate()) {
2973       // During compaction we might have to request a new page.
2974       // Check that space still have room for that.
2975       if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2976         EvacuateLiveObjectsFromPage(p);
2977       } else {
2978         // Without room for expansion evacuation is not guaranteed to succeed.
2979         // Pessimistically abandon unevacuated pages.
2980         for (int j = i; j < npages; j++) {
2981           Page* page = evacuation_candidates_[j];
2982           slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2983           page->ClearEvacuationCandidate();
2984           page->SetFlag(Page::RESCAN_ON_EVACUATION);
2985         }
2986         return;
2987       }
2988     }
2989   }
2990 }
2991 
2992 
2993 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2994  public:
RetainAs(Object * object)2995   virtual Object* RetainAs(Object* object) {
2996     if (object->IsHeapObject()) {
2997       HeapObject* heap_object = HeapObject::cast(object);
2998       MapWord map_word = heap_object->map_word();
2999       if (map_word.IsForwardingAddress()) {
3000         return map_word.ToForwardingAddress();
3001       }
3002     }
3003     return object;
3004   }
3005 };
3006 
3007 
UpdateSlot(ObjectVisitor * v,SlotsBuffer::SlotType slot_type,Address addr)3008 static inline void UpdateSlot(ObjectVisitor* v,
3009                               SlotsBuffer::SlotType slot_type,
3010                               Address addr) {
3011   switch (slot_type) {
3012     case SlotsBuffer::CODE_TARGET_SLOT: {
3013       RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3014       rinfo.Visit(v);
3015       break;
3016     }
3017     case SlotsBuffer::CODE_ENTRY_SLOT: {
3018       v->VisitCodeEntry(addr);
3019       break;
3020     }
3021     case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3022       HeapObject* obj = HeapObject::FromAddress(addr);
3023       Code::cast(obj)->CodeIterateBody(v);
3024       break;
3025     }
3026     case SlotsBuffer::DEBUG_TARGET_SLOT: {
3027       RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3028       if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
3029       break;
3030     }
3031     case SlotsBuffer::JS_RETURN_SLOT: {
3032       RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3033       if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
3034       break;
3035     }
3036     case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3037       RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3038       rinfo.Visit(v);
3039       break;
3040     }
3041     default:
3042       UNREACHABLE();
3043       break;
3044   }
3045 }
3046 
3047 
3048 enum SweepingMode {
3049   SWEEP_ONLY,
3050   SWEEP_AND_VISIT_LIVE_OBJECTS
3051 };
3052 
3053 
3054 enum SkipListRebuildingMode {
3055   REBUILD_SKIP_LIST,
3056   IGNORE_SKIP_LIST
3057 };
3058 
3059 
3060 // Sweep a space precisely.  After this has been done the space can
3061 // be iterated precisely, hitting only the live objects.  Code space
3062 // is always swept precisely because we want to be able to iterate
3063 // over it.  Map space is swept precisely, because it is not compacted.
3064 // Slots in live objects pointing into evacuation candidates are updated
3065 // if requested.
3066 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
SweepPrecisely(PagedSpace * space,Page * p,ObjectVisitor * v)3067 static void SweepPrecisely(PagedSpace* space,
3068                            Page* p,
3069                            ObjectVisitor* v) {
3070   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3071   ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3072             space->identity() == CODE_SPACE);
3073   ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3074 
3075   MarkBit::CellType* cells = p->markbits()->cells();
3076   p->MarkSweptPrecisely();
3077 
3078   int last_cell_index =
3079       Bitmap::IndexToCell(
3080           Bitmap::CellAlignIndex(
3081               p->AddressToMarkbitIndex(p->area_end())));
3082 
3083   Address free_start = p->area_start();
3084   int cell_index =
3085       Bitmap::IndexToCell(
3086           Bitmap::CellAlignIndex(
3087               p->AddressToMarkbitIndex(free_start)));
3088 
3089   ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3090   Address object_address = free_start;
3091   int offsets[16];
3092 
3093   SkipList* skip_list = p->skip_list();
3094   int curr_region = -1;
3095   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3096     skip_list->Clear();
3097   }
3098 
3099   for (;
3100        cell_index < last_cell_index;
3101        cell_index++, object_address += 32 * kPointerSize) {
3102     ASSERT((unsigned)cell_index ==
3103         Bitmap::IndexToCell(
3104             Bitmap::CellAlignIndex(
3105                 p->AddressToMarkbitIndex(object_address))));
3106     int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
3107     int live_index = 0;
3108     for ( ; live_objects != 0; live_objects--) {
3109       Address free_end = object_address + offsets[live_index++] * kPointerSize;
3110       if (free_end != free_start) {
3111         space->Free(free_start, static_cast<int>(free_end - free_start));
3112       }
3113       HeapObject* live_object = HeapObject::FromAddress(free_end);
3114       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3115       Map* map = live_object->map();
3116       int size = live_object->SizeFromMap(map);
3117       if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3118         live_object->IterateBody(map->instance_type(), size, v);
3119       }
3120       if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3121         int new_region_start =
3122             SkipList::RegionNumber(free_end);
3123         int new_region_end =
3124             SkipList::RegionNumber(free_end + size - kPointerSize);
3125         if (new_region_start != curr_region ||
3126             new_region_end != curr_region) {
3127           skip_list->AddObject(free_end, size);
3128           curr_region = new_region_end;
3129         }
3130       }
3131       free_start = free_end + size;
3132     }
3133     // Clear marking bits for current cell.
3134     cells[cell_index] = 0;
3135   }
3136   if (free_start != p->area_end()) {
3137     space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3138   }
3139   p->ResetLiveBytes();
3140 }
3141 
3142 
SetMarkBitsUnderInvalidatedCode(Code * code,bool value)3143 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3144   Page* p = Page::FromAddress(code->address());
3145 
3146   if (p->IsEvacuationCandidate() ||
3147       p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3148     return false;
3149   }
3150 
3151   Address code_start = code->address();
3152   Address code_end = code_start + code->Size();
3153 
3154   uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3155   uint32_t end_index =
3156       MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3157 
3158   Bitmap* b = p->markbits();
3159 
3160   MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3161   MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3162 
3163   MarkBit::CellType* start_cell = start_mark_bit.cell();
3164   MarkBit::CellType* end_cell = end_mark_bit.cell();
3165 
3166   if (value) {
3167     MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3168     MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3169 
3170     if (start_cell == end_cell) {
3171       *start_cell |= start_mask & end_mask;
3172     } else {
3173       *start_cell |= start_mask;
3174       for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3175         *cell = ~0;
3176       }
3177       *end_cell |= end_mask;
3178     }
3179   } else {
3180     for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3181       *cell = 0;
3182     }
3183   }
3184 
3185   return true;
3186 }
3187 
3188 
IsOnInvalidatedCodeObject(Address addr)3189 static bool IsOnInvalidatedCodeObject(Address addr) {
3190   // We did not record any slots in large objects thus
3191   // we can safely go to the page from the slot address.
3192   Page* p = Page::FromAddress(addr);
3193 
3194   // First check owner's identity because old pointer and old data spaces
3195   // are swept lazily and might still have non-zero mark-bits on some
3196   // pages.
3197   if (p->owner()->identity() != CODE_SPACE) return false;
3198 
3199   // In code space only bits on evacuation candidates (but we don't record
3200   // any slots on them) and under invalidated code objects are non-zero.
3201   MarkBit mark_bit =
3202       p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3203 
3204   return mark_bit.Get();
3205 }
3206 
3207 
InvalidateCode(Code * code)3208 void MarkCompactCollector::InvalidateCode(Code* code) {
3209   if (heap_->incremental_marking()->IsCompacting() &&
3210       !ShouldSkipEvacuationSlotRecording(code)) {
3211     ASSERT(compacting_);
3212 
3213     // If the object is white than no slots were recorded on it yet.
3214     MarkBit mark_bit = Marking::MarkBitFrom(code);
3215     if (Marking::IsWhite(mark_bit)) return;
3216 
3217     invalidated_code_.Add(code);
3218   }
3219 }
3220 
3221 
MarkInvalidatedCode()3222 bool MarkCompactCollector::MarkInvalidatedCode() {
3223   bool code_marked = false;
3224 
3225   int length = invalidated_code_.length();
3226   for (int i = 0; i < length; i++) {
3227     Code* code = invalidated_code_[i];
3228 
3229     if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3230       code_marked = true;
3231     }
3232   }
3233 
3234   return code_marked;
3235 }
3236 
3237 
RemoveDeadInvalidatedCode()3238 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3239   int length = invalidated_code_.length();
3240   for (int i = 0; i < length; i++) {
3241     if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3242   }
3243 }
3244 
3245 
ProcessInvalidatedCode(ObjectVisitor * visitor)3246 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3247   int length = invalidated_code_.length();
3248   for (int i = 0; i < length; i++) {
3249     Code* code = invalidated_code_[i];
3250     if (code != NULL) {
3251       code->Iterate(visitor);
3252       SetMarkBitsUnderInvalidatedCode(code, false);
3253     }
3254   }
3255   invalidated_code_.Rewind(0);
3256 }
3257 
3258 
EvacuateNewSpaceAndCandidates()3259 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3260   bool code_slots_filtering_required;
3261   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3262     code_slots_filtering_required = MarkInvalidatedCode();
3263 
3264     EvacuateNewSpace();
3265   }
3266 
3267 
3268   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3269     EvacuatePages();
3270   }
3271 
3272   // Second pass: find pointers to new space and update them.
3273   PointersUpdatingVisitor updating_visitor(heap());
3274 
3275   { GCTracer::Scope gc_scope(tracer_,
3276                              GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3277     // Update pointers in to space.
3278     SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3279                             heap()->new_space()->top());
3280     for (HeapObject* object = to_it.Next();
3281          object != NULL;
3282          object = to_it.Next()) {
3283       Map* map = object->map();
3284       object->IterateBody(map->instance_type(),
3285                           object->SizeFromMap(map),
3286                           &updating_visitor);
3287     }
3288   }
3289 
3290   { GCTracer::Scope gc_scope(tracer_,
3291                              GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3292     // Update roots.
3293     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3294     LiveObjectList::IterateElements(&updating_visitor);
3295   }
3296 
3297   { GCTracer::Scope gc_scope(tracer_,
3298                              GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3299     StoreBufferRebuildScope scope(heap_,
3300                                   heap_->store_buffer(),
3301                                   &Heap::ScavengeStoreBufferCallback);
3302     heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3303   }
3304 
3305   { GCTracer::Scope gc_scope(tracer_,
3306                              GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3307     SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3308                                        migration_slots_buffer_,
3309                                        code_slots_filtering_required);
3310     if (FLAG_trace_fragmentation) {
3311       PrintF("  migration slots buffer: %d\n",
3312              SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3313     }
3314 
3315     if (compacting_ && was_marked_incrementally_) {
3316       // It's difficult to filter out slots recorded for large objects.
3317       LargeObjectIterator it(heap_->lo_space());
3318       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3319         // LargeObjectSpace is not swept yet thus we have to skip
3320         // dead objects explicitly.
3321         if (!IsMarked(obj)) continue;
3322 
3323         Page* p = Page::FromAddress(obj->address());
3324         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3325           obj->Iterate(&updating_visitor);
3326           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3327         }
3328       }
3329     }
3330   }
3331 
3332   int npages = evacuation_candidates_.length();
3333   { GCTracer::Scope gc_scope(
3334       tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3335     for (int i = 0; i < npages; i++) {
3336       Page* p = evacuation_candidates_[i];
3337       ASSERT(p->IsEvacuationCandidate() ||
3338              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3339 
3340       if (p->IsEvacuationCandidate()) {
3341         SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3342                                            p->slots_buffer(),
3343                                            code_slots_filtering_required);
3344         if (FLAG_trace_fragmentation) {
3345           PrintF("  page %p slots buffer: %d\n",
3346                  reinterpret_cast<void*>(p),
3347                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
3348         }
3349 
3350         // Important: skip list should be cleared only after roots were updated
3351         // because root iteration traverses the stack and might have to find
3352         // code objects from non-updated pc pointing into evacuation candidate.
3353         SkipList* list = p->skip_list();
3354         if (list != NULL) list->Clear();
3355       } else {
3356         if (FLAG_gc_verbose) {
3357           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3358                  reinterpret_cast<intptr_t>(p));
3359         }
3360         PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3361         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3362 
3363         switch (space->identity()) {
3364           case OLD_DATA_SPACE:
3365             SweepConservatively(space, p);
3366             break;
3367           case OLD_POINTER_SPACE:
3368             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3369                 space, p, &updating_visitor);
3370             break;
3371           case CODE_SPACE:
3372             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3373                 space, p, &updating_visitor);
3374             break;
3375           default:
3376             UNREACHABLE();
3377             break;
3378         }
3379       }
3380     }
3381   }
3382 
3383   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3384 
3385   // Update pointers from cells.
3386   HeapObjectIterator cell_iterator(heap_->cell_space());
3387   for (HeapObject* cell = cell_iterator.Next();
3388        cell != NULL;
3389        cell = cell_iterator.Next()) {
3390     if (cell->IsJSGlobalPropertyCell()) {
3391       Address value_address =
3392           reinterpret_cast<Address>(cell) +
3393           (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3394       updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3395     }
3396   }
3397 
3398   // Update pointer from the global contexts list.
3399   updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3400 
3401   heap_->symbol_table()->Iterate(&updating_visitor);
3402 
3403   // Update pointers from external string table.
3404   heap_->UpdateReferencesInExternalStringTable(
3405       &UpdateReferenceInExternalStringTableEntry);
3406 
3407   if (!FLAG_watch_ic_patching) {
3408     // Update JSFunction pointers from the runtime profiler.
3409     heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3410         &updating_visitor);
3411   }
3412 
3413   EvacuationWeakObjectRetainer evacuation_object_retainer;
3414   heap()->ProcessWeakReferences(&evacuation_object_retainer);
3415 
3416   // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3417   // under it.
3418   ProcessInvalidatedCode(&updating_visitor);
3419 
3420 #ifdef DEBUG
3421   if (FLAG_verify_heap) {
3422     VerifyEvacuation(heap_);
3423   }
3424 #endif
3425 
3426   slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3427   ASSERT(migration_slots_buffer_ == NULL);
3428   for (int i = 0; i < npages; i++) {
3429     Page* p = evacuation_candidates_[i];
3430     if (!p->IsEvacuationCandidate()) continue;
3431     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3432     space->Free(p->area_start(), p->area_size());
3433     p->set_scan_on_scavenge(false);
3434     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3435     p->ResetLiveBytes();
3436     space->ReleasePage(p);
3437   }
3438   evacuation_candidates_.Rewind(0);
3439   compacting_ = false;
3440 }
3441 
3442 
3443 static const int kStartTableEntriesPerLine = 5;
3444 static const int kStartTableLines = 171;
3445 static const int kStartTableInvalidLine = 127;
3446 static const int kStartTableUnusedEntry = 126;
3447 
3448 #define _ kStartTableUnusedEntry
3449 #define X kStartTableInvalidLine
3450 // Mark-bit to object start offset table.
3451 //
3452 // The line is indexed by the mark bits in a byte.  The first number on
3453 // the line describes the number of live object starts for the line and the
3454 // other numbers on the line describe the offsets (in words) of the object
3455 // starts.
3456 //
3457 // Since objects are at least 2 words large we don't have entries for two
3458 // consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
3459 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3460   0, _, _, _, _,  // 0
3461   1, 0, _, _, _,  // 1
3462   1, 1, _, _, _,  // 2
3463   X, _, _, _, _,  // 3
3464   1, 2, _, _, _,  // 4
3465   2, 0, 2, _, _,  // 5
3466   X, _, _, _, _,  // 6
3467   X, _, _, _, _,  // 7
3468   1, 3, _, _, _,  // 8
3469   2, 0, 3, _, _,  // 9
3470   2, 1, 3, _, _,  // 10
3471   X, _, _, _, _,  // 11
3472   X, _, _, _, _,  // 12
3473   X, _, _, _, _,  // 13
3474   X, _, _, _, _,  // 14
3475   X, _, _, _, _,  // 15
3476   1, 4, _, _, _,  // 16
3477   2, 0, 4, _, _,  // 17
3478   2, 1, 4, _, _,  // 18
3479   X, _, _, _, _,  // 19
3480   2, 2, 4, _, _,  // 20
3481   3, 0, 2, 4, _,  // 21
3482   X, _, _, _, _,  // 22
3483   X, _, _, _, _,  // 23
3484   X, _, _, _, _,  // 24
3485   X, _, _, _, _,  // 25
3486   X, _, _, _, _,  // 26
3487   X, _, _, _, _,  // 27
3488   X, _, _, _, _,  // 28
3489   X, _, _, _, _,  // 29
3490   X, _, _, _, _,  // 30
3491   X, _, _, _, _,  // 31
3492   1, 5, _, _, _,  // 32
3493   2, 0, 5, _, _,  // 33
3494   2, 1, 5, _, _,  // 34
3495   X, _, _, _, _,  // 35
3496   2, 2, 5, _, _,  // 36
3497   3, 0, 2, 5, _,  // 37
3498   X, _, _, _, _,  // 38
3499   X, _, _, _, _,  // 39
3500   2, 3, 5, _, _,  // 40
3501   3, 0, 3, 5, _,  // 41
3502   3, 1, 3, 5, _,  // 42
3503   X, _, _, _, _,  // 43
3504   X, _, _, _, _,  // 44
3505   X, _, _, _, _,  // 45
3506   X, _, _, _, _,  // 46
3507   X, _, _, _, _,  // 47
3508   X, _, _, _, _,  // 48
3509   X, _, _, _, _,  // 49
3510   X, _, _, _, _,  // 50
3511   X, _, _, _, _,  // 51
3512   X, _, _, _, _,  // 52
3513   X, _, _, _, _,  // 53
3514   X, _, _, _, _,  // 54
3515   X, _, _, _, _,  // 55
3516   X, _, _, _, _,  // 56
3517   X, _, _, _, _,  // 57
3518   X, _, _, _, _,  // 58
3519   X, _, _, _, _,  // 59
3520   X, _, _, _, _,  // 60
3521   X, _, _, _, _,  // 61
3522   X, _, _, _, _,  // 62
3523   X, _, _, _, _,  // 63
3524   1, 6, _, _, _,  // 64
3525   2, 0, 6, _, _,  // 65
3526   2, 1, 6, _, _,  // 66
3527   X, _, _, _, _,  // 67
3528   2, 2, 6, _, _,  // 68
3529   3, 0, 2, 6, _,  // 69
3530   X, _, _, _, _,  // 70
3531   X, _, _, _, _,  // 71
3532   2, 3, 6, _, _,  // 72
3533   3, 0, 3, 6, _,  // 73
3534   3, 1, 3, 6, _,  // 74
3535   X, _, _, _, _,  // 75
3536   X, _, _, _, _,  // 76
3537   X, _, _, _, _,  // 77
3538   X, _, _, _, _,  // 78
3539   X, _, _, _, _,  // 79
3540   2, 4, 6, _, _,  // 80
3541   3, 0, 4, 6, _,  // 81
3542   3, 1, 4, 6, _,  // 82
3543   X, _, _, _, _,  // 83
3544   3, 2, 4, 6, _,  // 84
3545   4, 0, 2, 4, 6,  // 85
3546   X, _, _, _, _,  // 86
3547   X, _, _, _, _,  // 87
3548   X, _, _, _, _,  // 88
3549   X, _, _, _, _,  // 89
3550   X, _, _, _, _,  // 90
3551   X, _, _, _, _,  // 91
3552   X, _, _, _, _,  // 92
3553   X, _, _, _, _,  // 93
3554   X, _, _, _, _,  // 94
3555   X, _, _, _, _,  // 95
3556   X, _, _, _, _,  // 96
3557   X, _, _, _, _,  // 97
3558   X, _, _, _, _,  // 98
3559   X, _, _, _, _,  // 99
3560   X, _, _, _, _,  // 100
3561   X, _, _, _, _,  // 101
3562   X, _, _, _, _,  // 102
3563   X, _, _, _, _,  // 103
3564   X, _, _, _, _,  // 104
3565   X, _, _, _, _,  // 105
3566   X, _, _, _, _,  // 106
3567   X, _, _, _, _,  // 107
3568   X, _, _, _, _,  // 108
3569   X, _, _, _, _,  // 109
3570   X, _, _, _, _,  // 110
3571   X, _, _, _, _,  // 111
3572   X, _, _, _, _,  // 112
3573   X, _, _, _, _,  // 113
3574   X, _, _, _, _,  // 114
3575   X, _, _, _, _,  // 115
3576   X, _, _, _, _,  // 116
3577   X, _, _, _, _,  // 117
3578   X, _, _, _, _,  // 118
3579   X, _, _, _, _,  // 119
3580   X, _, _, _, _,  // 120
3581   X, _, _, _, _,  // 121
3582   X, _, _, _, _,  // 122
3583   X, _, _, _, _,  // 123
3584   X, _, _, _, _,  // 124
3585   X, _, _, _, _,  // 125
3586   X, _, _, _, _,  // 126
3587   X, _, _, _, _,  // 127
3588   1, 7, _, _, _,  // 128
3589   2, 0, 7, _, _,  // 129
3590   2, 1, 7, _, _,  // 130
3591   X, _, _, _, _,  // 131
3592   2, 2, 7, _, _,  // 132
3593   3, 0, 2, 7, _,  // 133
3594   X, _, _, _, _,  // 134
3595   X, _, _, _, _,  // 135
3596   2, 3, 7, _, _,  // 136
3597   3, 0, 3, 7, _,  // 137
3598   3, 1, 3, 7, _,  // 138
3599   X, _, _, _, _,  // 139
3600   X, _, _, _, _,  // 140
3601   X, _, _, _, _,  // 141
3602   X, _, _, _, _,  // 142
3603   X, _, _, _, _,  // 143
3604   2, 4, 7, _, _,  // 144
3605   3, 0, 4, 7, _,  // 145
3606   3, 1, 4, 7, _,  // 146
3607   X, _, _, _, _,  // 147
3608   3, 2, 4, 7, _,  // 148
3609   4, 0, 2, 4, 7,  // 149
3610   X, _, _, _, _,  // 150
3611   X, _, _, _, _,  // 151
3612   X, _, _, _, _,  // 152
3613   X, _, _, _, _,  // 153
3614   X, _, _, _, _,  // 154
3615   X, _, _, _, _,  // 155
3616   X, _, _, _, _,  // 156
3617   X, _, _, _, _,  // 157
3618   X, _, _, _, _,  // 158
3619   X, _, _, _, _,  // 159
3620   2, 5, 7, _, _,  // 160
3621   3, 0, 5, 7, _,  // 161
3622   3, 1, 5, 7, _,  // 162
3623   X, _, _, _, _,  // 163
3624   3, 2, 5, 7, _,  // 164
3625   4, 0, 2, 5, 7,  // 165
3626   X, _, _, _, _,  // 166
3627   X, _, _, _, _,  // 167
3628   3, 3, 5, 7, _,  // 168
3629   4, 0, 3, 5, 7,  // 169
3630   4, 1, 3, 5, 7   // 170
3631 };
3632 #undef _
3633 #undef X
3634 
3635 
3636 // Takes a word of mark bits.  Returns the number of objects that start in the
3637 // range.  Puts the offsets of the words in the supplied array.
MarkWordToObjectStarts(uint32_t mark_bits,int * starts)3638 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3639   int objects = 0;
3640   int offset = 0;
3641 
3642   // No consecutive 1 bits.
3643   ASSERT((mark_bits & 0x180) != 0x180);
3644   ASSERT((mark_bits & 0x18000) != 0x18000);
3645   ASSERT((mark_bits & 0x1800000) != 0x1800000);
3646 
3647   while (mark_bits != 0) {
3648     int byte = (mark_bits & 0xff);
3649     mark_bits >>= 8;
3650     if (byte != 0) {
3651       ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
3652       char* table = kStartTable + byte * kStartTableEntriesPerLine;
3653       int objects_in_these_8_words = table[0];
3654       ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3655       ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3656       for (int i = 0; i < objects_in_these_8_words; i++) {
3657         starts[objects++] = offset + table[1 + i];
3658       }
3659     }
3660     offset += 8;
3661   }
3662   return objects;
3663 }
3664 
3665 
DigestFreeStart(Address approximate_free_start,uint32_t free_start_cell)3666 static inline Address DigestFreeStart(Address approximate_free_start,
3667                                       uint32_t free_start_cell) {
3668   ASSERT(free_start_cell != 0);
3669 
3670   // No consecutive 1 bits.
3671   ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3672 
3673   int offsets[16];
3674   uint32_t cell = free_start_cell;
3675   int offset_of_last_live;
3676   if ((cell & 0x80000000u) != 0) {
3677     // This case would overflow below.
3678     offset_of_last_live = 31;
3679   } else {
3680     // Remove all but one bit, the most significant.  This is an optimization
3681     // that may or may not be worthwhile.
3682     cell |= cell >> 16;
3683     cell |= cell >> 8;
3684     cell |= cell >> 4;
3685     cell |= cell >> 2;
3686     cell |= cell >> 1;
3687     cell = (cell + 1) >> 1;
3688     int live_objects = MarkWordToObjectStarts(cell, offsets);
3689     ASSERT(live_objects == 1);
3690     offset_of_last_live = offsets[live_objects - 1];
3691   }
3692   Address last_live_start =
3693       approximate_free_start + offset_of_last_live * kPointerSize;
3694   HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3695   Address free_start = last_live_start + last_live->Size();
3696   return free_start;
3697 }
3698 
3699 
StartOfLiveObject(Address block_address,uint32_t cell)3700 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3701   ASSERT(cell != 0);
3702 
3703   // No consecutive 1 bits.
3704   ASSERT((cell & (cell << 1)) == 0);
3705 
3706   int offsets[16];
3707   if (cell == 0x80000000u) {  // Avoid overflow below.
3708     return block_address + 31 * kPointerSize;
3709   }
3710   uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3711   ASSERT((first_set_bit & cell) == first_set_bit);
3712   int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3713   ASSERT(live_objects == 1);
3714   USE(live_objects);
3715   return block_address + offsets[0] * kPointerSize;
3716 }
3717 
3718 
3719 // Sweeps a space conservatively.  After this has been done the larger free
3720 // spaces have been put on the free list and the smaller ones have been
3721 // ignored and left untouched.  A free space is always either ignored or put
3722 // on the free list, never split up into two parts.  This is important
3723 // because it means that any FreeSpace maps left actually describe a region of
3724 // memory that can be ignored when scanning.  Dead objects other than free
3725 // spaces will not contain the free space map.
SweepConservatively(PagedSpace * space,Page * p)3726 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
3727   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3728   MarkBit::CellType* cells = p->markbits()->cells();
3729   p->MarkSweptConservatively();
3730 
3731   int last_cell_index =
3732       Bitmap::IndexToCell(
3733           Bitmap::CellAlignIndex(
3734               p->AddressToMarkbitIndex(p->area_end())));
3735 
3736   int cell_index =
3737       Bitmap::IndexToCell(
3738           Bitmap::CellAlignIndex(
3739               p->AddressToMarkbitIndex(p->area_start())));
3740 
3741   intptr_t freed_bytes = 0;
3742 
3743   // This is the start of the 32 word block that we are currently looking at.
3744   Address block_address = p->area_start();
3745 
3746   // Skip over all the dead objects at the start of the page and mark them free.
3747   for (;
3748        cell_index < last_cell_index;
3749        cell_index++, block_address += 32 * kPointerSize) {
3750     if (cells[cell_index] != 0) break;
3751   }
3752   size_t size = block_address - p->area_start();
3753   if (cell_index == last_cell_index) {
3754     freed_bytes += static_cast<int>(space->Free(p->area_start(),
3755                                                 static_cast<int>(size)));
3756     ASSERT_EQ(0, p->LiveBytes());
3757     return freed_bytes;
3758   }
3759   // Grow the size of the start-of-page free space a little to get up to the
3760   // first live object.
3761   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3762   // Free the first free space.
3763   size = free_end - p->area_start();
3764   freed_bytes += space->Free(p->area_start(),
3765                              static_cast<int>(size));
3766   // The start of the current free area is represented in undigested form by
3767   // the address of the last 32-word section that contained a live object and
3768   // the marking bitmap for that cell, which describes where the live object
3769   // started.  Unless we find a large free space in the bitmap we will not
3770   // digest this pair into a real address.  We start the iteration here at the
3771   // first word in the marking bit map that indicates a live object.
3772   Address free_start = block_address;
3773   uint32_t free_start_cell = cells[cell_index];
3774 
3775   for ( ;
3776        cell_index < last_cell_index;
3777        cell_index++, block_address += 32 * kPointerSize) {
3778     ASSERT((unsigned)cell_index ==
3779         Bitmap::IndexToCell(
3780             Bitmap::CellAlignIndex(
3781                 p->AddressToMarkbitIndex(block_address))));
3782     uint32_t cell = cells[cell_index];
3783     if (cell != 0) {
3784       // We have a live object.  Check approximately whether it is more than 32
3785       // words since the last live object.
3786       if (block_address - free_start > 32 * kPointerSize) {
3787         free_start = DigestFreeStart(free_start, free_start_cell);
3788         if (block_address - free_start > 32 * kPointerSize) {
3789           // Now that we know the exact start of the free space it still looks
3790           // like we have a large enough free space to be worth bothering with.
3791           // so now we need to find the start of the first live object at the
3792           // end of the free space.
3793           free_end = StartOfLiveObject(block_address, cell);
3794           freed_bytes += space->Free(free_start,
3795                                      static_cast<int>(free_end - free_start));
3796         }
3797       }
3798       // Update our undigested record of where the current free area started.
3799       free_start = block_address;
3800       free_start_cell = cell;
3801       // Clear marking bits for current cell.
3802       cells[cell_index] = 0;
3803     }
3804   }
3805 
3806   // Handle the free space at the end of the page.
3807   if (block_address - free_start > 32 * kPointerSize) {
3808     free_start = DigestFreeStart(free_start, free_start_cell);
3809     freed_bytes += space->Free(free_start,
3810                                static_cast<int>(block_address - free_start));
3811   }
3812 
3813   p->ResetLiveBytes();
3814   return freed_bytes;
3815 }
3816 
3817 
SweepSpace(PagedSpace * space,SweeperType sweeper)3818 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3819   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3820                                       sweeper == LAZY_CONSERVATIVE);
3821 
3822   space->ClearStats();
3823 
3824   PageIterator it(space);
3825 
3826   intptr_t freed_bytes = 0;
3827   int pages_swept = 0;
3828   intptr_t newspace_size = space->heap()->new_space()->Size();
3829   bool lazy_sweeping_active = false;
3830   bool unused_page_present = false;
3831 
3832   intptr_t old_space_size = heap()->PromotedSpaceSize();
3833   intptr_t space_left =
3834       Min(heap()->OldGenPromotionLimit(old_space_size),
3835           heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3836 
3837   while (it.has_next()) {
3838     Page* p = it.next();
3839 
3840     // Clear sweeping flags indicating that marking bits are still intact.
3841     p->ClearSweptPrecisely();
3842     p->ClearSweptConservatively();
3843 
3844     if (p->IsEvacuationCandidate()) {
3845       ASSERT(evacuation_candidates_.length() > 0);
3846       continue;
3847     }
3848 
3849     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3850       // Will be processed in EvacuateNewSpaceAndCandidates.
3851       continue;
3852     }
3853 
3854     // One unused page is kept, all further are released before sweeping them.
3855     if (p->LiveBytes() == 0) {
3856       if (unused_page_present) {
3857         if (FLAG_gc_verbose) {
3858           PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3859                  reinterpret_cast<intptr_t>(p));
3860         }
3861         // Adjust unswept free bytes because releasing a page expects said
3862         // counter to be accurate for unswept pages.
3863         space->IncreaseUnsweptFreeBytes(p);
3864         space->ReleasePage(p);
3865         continue;
3866       }
3867       unused_page_present = true;
3868     }
3869 
3870     if (lazy_sweeping_active) {
3871       if (FLAG_gc_verbose) {
3872         PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3873                reinterpret_cast<intptr_t>(p));
3874       }
3875       space->IncreaseUnsweptFreeBytes(p);
3876       continue;
3877     }
3878 
3879     switch (sweeper) {
3880       case CONSERVATIVE: {
3881         if (FLAG_gc_verbose) {
3882           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3883                  reinterpret_cast<intptr_t>(p));
3884         }
3885         SweepConservatively(space, p);
3886         pages_swept++;
3887         break;
3888       }
3889       case LAZY_CONSERVATIVE: {
3890         if (FLAG_gc_verbose) {
3891           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3892                  reinterpret_cast<intptr_t>(p));
3893         }
3894         freed_bytes += SweepConservatively(space, p);
3895         pages_swept++;
3896         if (space_left + freed_bytes > newspace_size) {
3897           space->SetPagesToSweep(p->next_page());
3898           lazy_sweeping_active = true;
3899         } else {
3900           if (FLAG_gc_verbose) {
3901             PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
3902                    freed_bytes);
3903           }
3904         }
3905         break;
3906       }
3907       case PRECISE: {
3908         if (FLAG_gc_verbose) {
3909           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3910                  reinterpret_cast<intptr_t>(p));
3911         }
3912         if (space->identity() == CODE_SPACE) {
3913           SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3914         } else {
3915           SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3916         }
3917         pages_swept++;
3918         break;
3919       }
3920       default: {
3921         UNREACHABLE();
3922       }
3923     }
3924   }
3925 
3926   if (FLAG_gc_verbose) {
3927     PrintF("SweepSpace: %s (%d pages swept)\n",
3928            AllocationSpaceName(space->identity()),
3929            pages_swept);
3930   }
3931 
3932   // Give pages that are queued to be freed back to the OS.
3933   heap()->FreeQueuedChunks();
3934 }
3935 
3936 
SweepSpaces()3937 void MarkCompactCollector::SweepSpaces() {
3938   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3939 #ifdef DEBUG
3940   state_ = SWEEP_SPACES;
3941 #endif
3942   SweeperType how_to_sweep =
3943       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3944   if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
3945   if (sweep_precisely_) how_to_sweep = PRECISE;
3946   // Noncompacting collections simply sweep the spaces to clear the mark
3947   // bits and free the nonlive blocks (for old and map spaces).  We sweep
3948   // the map space last because freeing non-live maps overwrites them and
3949   // the other spaces rely on possibly non-live maps to get the sizes for
3950   // non-live objects.
3951   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3952   SweepSpace(heap()->old_data_space(), how_to_sweep);
3953 
3954   RemoveDeadInvalidatedCode();
3955   SweepSpace(heap()->code_space(), PRECISE);
3956 
3957   SweepSpace(heap()->cell_space(), PRECISE);
3958 
3959   EvacuateNewSpaceAndCandidates();
3960 
3961   // ClearNonLiveTransitions depends on precise sweeping of map space to
3962   // detect whether unmarked map became dead in this collection or in one
3963   // of the previous ones.
3964   SweepSpace(heap()->map_space(), PRECISE);
3965 
3966   // Deallocate unmarked objects and clear marked bits for marked objects.
3967   heap_->lo_space()->FreeUnmarkedObjects();
3968 }
3969 
3970 
EnableCodeFlushing(bool enable)3971 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3972   if (enable) {
3973     if (code_flusher_ != NULL) return;
3974     code_flusher_ = new CodeFlusher(heap()->isolate());
3975   } else {
3976     if (code_flusher_ == NULL) return;
3977     delete code_flusher_;
3978     code_flusher_ = NULL;
3979   }
3980 }
3981 
3982 
3983 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3984 // Our profiling tools do not expect intersections between
3985 // code objects. We should either reenable it or change our tools.
ReportDeleteIfNeeded(HeapObject * obj,Isolate * isolate)3986 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3987                                                 Isolate* isolate) {
3988 #ifdef ENABLE_GDB_JIT_INTERFACE
3989   if (obj->IsCode()) {
3990     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3991   }
3992 #endif
3993   if (obj->IsCode()) {
3994     PROFILE(isolate, CodeDeleteEvent(obj->address()));
3995   }
3996 }
3997 
3998 
Initialize()3999 void MarkCompactCollector::Initialize() {
4000   StaticMarkingVisitor::Initialize();
4001 }
4002 
4003 
IsTypedSlot(ObjectSlot slot)4004 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4005   return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4006 }
4007 
4008 
AddTo(SlotsBufferAllocator * allocator,SlotsBuffer ** buffer_address,SlotType type,Address addr,AdditionMode mode)4009 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4010                         SlotsBuffer** buffer_address,
4011                         SlotType type,
4012                         Address addr,
4013                         AdditionMode mode) {
4014   SlotsBuffer* buffer = *buffer_address;
4015   if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4016     if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4017       allocator->DeallocateChain(buffer_address);
4018       return false;
4019     }
4020     buffer = allocator->AllocateBuffer(buffer);
4021     *buffer_address = buffer;
4022   }
4023   ASSERT(buffer->HasSpaceForTypedSlot());
4024   buffer->Add(reinterpret_cast<ObjectSlot>(type));
4025   buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4026   return true;
4027 }
4028 
4029 
SlotTypeForRMode(RelocInfo::Mode rmode)4030 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4031   if (RelocInfo::IsCodeTarget(rmode)) {
4032     return SlotsBuffer::CODE_TARGET_SLOT;
4033   } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4034     return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4035   } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4036     return SlotsBuffer::DEBUG_TARGET_SLOT;
4037   } else if (RelocInfo::IsJSReturn(rmode)) {
4038     return SlotsBuffer::JS_RETURN_SLOT;
4039   }
4040   UNREACHABLE();
4041   return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4042 }
4043 
4044 
RecordRelocSlot(RelocInfo * rinfo,Object * target)4045 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4046   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4047   if (target_page->IsEvacuationCandidate() &&
4048       (rinfo->host() == NULL ||
4049        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4050     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4051                             target_page->slots_buffer_address(),
4052                             SlotTypeForRMode(rinfo->rmode()),
4053                             rinfo->pc(),
4054                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
4055       EvictEvacuationCandidate(target_page);
4056     }
4057   }
4058 }
4059 
4060 
RecordCodeEntrySlot(Address slot,Code * target)4061 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4062   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4063   if (target_page->IsEvacuationCandidate() &&
4064       !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4065     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4066                             target_page->slots_buffer_address(),
4067                             SlotsBuffer::CODE_ENTRY_SLOT,
4068                             slot,
4069                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
4070       EvictEvacuationCandidate(target_page);
4071     }
4072   }
4073 }
4074 
4075 
DecodeSlotType(SlotsBuffer::ObjectSlot slot)4076 static inline SlotsBuffer::SlotType DecodeSlotType(
4077     SlotsBuffer::ObjectSlot slot) {
4078   return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4079 }
4080 
4081 
UpdateSlots(Heap * heap)4082 void SlotsBuffer::UpdateSlots(Heap* heap) {
4083   PointersUpdatingVisitor v(heap);
4084 
4085   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4086     ObjectSlot slot = slots_[slot_idx];
4087     if (!IsTypedSlot(slot)) {
4088       PointersUpdatingVisitor::UpdateSlot(heap, slot);
4089     } else {
4090       ++slot_idx;
4091       ASSERT(slot_idx < idx_);
4092       UpdateSlot(&v,
4093                  DecodeSlotType(slot),
4094                  reinterpret_cast<Address>(slots_[slot_idx]));
4095     }
4096   }
4097 }
4098 
4099 
UpdateSlotsWithFilter(Heap * heap)4100 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4101   PointersUpdatingVisitor v(heap);
4102 
4103   for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4104     ObjectSlot slot = slots_[slot_idx];
4105     if (!IsTypedSlot(slot)) {
4106       if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4107         PointersUpdatingVisitor::UpdateSlot(heap, slot);
4108       }
4109     } else {
4110       ++slot_idx;
4111       ASSERT(slot_idx < idx_);
4112       Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4113       if (!IsOnInvalidatedCodeObject(pc)) {
4114         UpdateSlot(&v,
4115                    DecodeSlotType(slot),
4116                    reinterpret_cast<Address>(slots_[slot_idx]));
4117       }
4118     }
4119   }
4120 }
4121 
4122 
AllocateBuffer(SlotsBuffer * next_buffer)4123 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4124   return new SlotsBuffer(next_buffer);
4125 }
4126 
4127 
DeallocateBuffer(SlotsBuffer * buffer)4128 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4129   delete buffer;
4130 }
4131 
4132 
DeallocateChain(SlotsBuffer ** buffer_address)4133 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4134   SlotsBuffer* buffer = *buffer_address;
4135   while (buffer != NULL) {
4136     SlotsBuffer* next_buffer = buffer->next();
4137     DeallocateBuffer(buffer);
4138     buffer = next_buffer;
4139   }
4140   *buffer_address = NULL;
4141 }
4142 
4143 
4144 } }  // namespace v8::internal
4145