• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "compilation-cache.h"
31 #include "execution.h"
32 #include "heap-profiler.h"
33 #include "gdb-jit.h"
34 #include "global-handles.h"
35 #include "ic-inl.h"
36 #include "liveobjectlist-inl.h"
37 #include "mark-compact.h"
38 #include "objects-visiting.h"
39 #include "stub-cache.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 // -------------------------------------------------------------------------
45 // MarkCompactCollector
46 
MarkCompactCollector()47 MarkCompactCollector::MarkCompactCollector() :  // NOLINT
48 #ifdef DEBUG
49       state_(IDLE),
50 #endif
51       force_compaction_(false),
52       compacting_collection_(false),
53       compact_on_next_gc_(false),
54       previous_marked_count_(0),
55       tracer_(NULL),
56 #ifdef DEBUG
57       live_young_objects_size_(0),
58       live_old_pointer_objects_size_(0),
59       live_old_data_objects_size_(0),
60       live_code_objects_size_(0),
61       live_map_objects_size_(0),
62       live_cell_objects_size_(0),
63       live_lo_objects_size_(0),
64       live_bytes_(0),
65 #endif
66       heap_(NULL),
67       code_flusher_(NULL) { }
68 
69 
CollectGarbage()70 void MarkCompactCollector::CollectGarbage() {
71   // Make sure that Prepare() has been called. The individual steps below will
72   // update the state as they proceed.
73   ASSERT(state_ == PREPARE_GC);
74 
75   // Prepare has selected whether to compact the old generation or not.
76   // Tell the tracer.
77   if (IsCompacting()) tracer_->set_is_compacting();
78 
79   MarkLiveObjects();
80 
81   if (FLAG_collect_maps) ClearNonLiveTransitions();
82 
83   SweepLargeObjectSpace();
84 
85   if (IsCompacting()) {
86     GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
87     EncodeForwardingAddresses();
88 
89     heap()->MarkMapPointersAsEncoded(true);
90     UpdatePointers();
91     heap()->MarkMapPointersAsEncoded(false);
92     heap()->isolate()->pc_to_code_cache()->Flush();
93 
94     RelocateObjects();
95   } else {
96     SweepSpaces();
97     heap()->isolate()->pc_to_code_cache()->Flush();
98   }
99 
100   Finish();
101 
102   // Save the count of marked objects remaining after the collection and
103   // null out the GC tracer.
104   previous_marked_count_ = tracer_->marked_count();
105   ASSERT(previous_marked_count_ == 0);
106   tracer_ = NULL;
107 }
108 
109 
Prepare(GCTracer * tracer)110 void MarkCompactCollector::Prepare(GCTracer* tracer) {
111   // Rather than passing the tracer around we stash it in a static member
112   // variable.
113   tracer_ = tracer;
114 
115 #ifdef DEBUG
116   ASSERT(state_ == IDLE);
117   state_ = PREPARE_GC;
118 #endif
119   ASSERT(!FLAG_always_compact || !FLAG_never_compact);
120 
121   compacting_collection_ =
122       FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
123   compact_on_next_gc_ = false;
124 
125   if (FLAG_never_compact) compacting_collection_ = false;
126   if (!heap()->map_space()->MapPointersEncodable())
127       compacting_collection_ = false;
128   if (FLAG_collect_maps) CreateBackPointers();
129 #ifdef ENABLE_GDB_JIT_INTERFACE
130   if (FLAG_gdbjit) {
131     // If GDBJIT interface is active disable compaction.
132     compacting_collection_ = false;
133   }
134 #endif
135 
136   PagedSpaces spaces;
137   for (PagedSpace* space = spaces.next();
138        space != NULL; space = spaces.next()) {
139     space->PrepareForMarkCompact(compacting_collection_);
140   }
141 
142 #ifdef DEBUG
143   live_bytes_ = 0;
144   live_young_objects_size_ = 0;
145   live_old_pointer_objects_size_ = 0;
146   live_old_data_objects_size_ = 0;
147   live_code_objects_size_ = 0;
148   live_map_objects_size_ = 0;
149   live_cell_objects_size_ = 0;
150   live_lo_objects_size_ = 0;
151 #endif
152 }
153 
154 
Finish()155 void MarkCompactCollector::Finish() {
156 #ifdef DEBUG
157   ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
158   state_ = IDLE;
159 #endif
160   // The stub cache is not traversed during GC; clear the cache to
161   // force lazy re-initialization of it. This must be done after the
162   // GC, because it relies on the new address of certain old space
163   // objects (empty string, illegal builtin).
164   heap()->isolate()->stub_cache()->Clear();
165 
166   heap()->external_string_table_.CleanUp();
167 
168   // If we've just compacted old space there's no reason to check the
169   // fragmentation limit. Just return.
170   if (HasCompacted()) return;
171 
172   // We compact the old generation on the next GC if it has gotten too
173   // fragmented (ie, we could recover an expected amount of space by
174   // reclaiming the waste and free list blocks).
175   static const int kFragmentationLimit = 15;        // Percent.
176   static const int kFragmentationAllowed = 1 * MB;  // Absolute.
177   intptr_t old_gen_recoverable = 0;
178   intptr_t old_gen_used = 0;
179 
180   OldSpaces spaces;
181   for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
182     old_gen_recoverable += space->Waste() + space->AvailableFree();
183     old_gen_used += space->Size();
184   }
185 
186   int old_gen_fragmentation =
187       static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
188   if (old_gen_fragmentation > kFragmentationLimit &&
189       old_gen_recoverable > kFragmentationAllowed) {
190     compact_on_next_gc_ = true;
191   }
192 }
193 
194 
195 // -------------------------------------------------------------------------
196 // Phase 1: tracing and marking live objects.
197 //   before: all objects are in normal state.
198 //   after: a live object's map pointer is marked as '00'.
199 
200 // Marking all live objects in the heap as part of mark-sweep or mark-compact
201 // collection.  Before marking, all objects are in their normal state.  After
202 // marking, live objects' map pointers are marked indicating that the object
203 // has been found reachable.
204 //
205 // The marking algorithm is a (mostly) depth-first (because of possible stack
206 // overflow) traversal of the graph of objects reachable from the roots.  It
207 // uses an explicit stack of pointers rather than recursion.  The young
208 // generation's inactive ('from') space is used as a marking stack.  The
209 // objects in the marking stack are the ones that have been reached and marked
210 // but their children have not yet been visited.
211 //
212 // The marking stack can overflow during traversal.  In that case, we set an
213 // overflow flag.  When the overflow flag is set, we continue marking objects
214 // reachable from the objects on the marking stack, but no longer push them on
215 // the marking stack.  Instead, we mark them as both marked and overflowed.
216 // When the stack is in the overflowed state, objects marked as overflowed
217 // have been reached and marked but their children have not been visited yet.
218 // After emptying the marking stack, we clear the overflow flag and traverse
219 // the heap looking for objects marked as overflowed, push them on the stack,
220 // and continue with marking.  This process repeats until all reachable
221 // objects have been marked.
222 
223 class CodeFlusher {
224  public:
CodeFlusher(Isolate * isolate)225   explicit CodeFlusher(Isolate* isolate)
226       : isolate_(isolate),
227         jsfunction_candidates_head_(NULL),
228         shared_function_info_candidates_head_(NULL) {}
229 
AddCandidate(SharedFunctionInfo * shared_info)230   void AddCandidate(SharedFunctionInfo* shared_info) {
231     SetNextCandidate(shared_info, shared_function_info_candidates_head_);
232     shared_function_info_candidates_head_ = shared_info;
233   }
234 
AddCandidate(JSFunction * function)235   void AddCandidate(JSFunction* function) {
236     ASSERT(function->unchecked_code() ==
237            function->unchecked_shared()->unchecked_code());
238 
239     SetNextCandidate(function, jsfunction_candidates_head_);
240     jsfunction_candidates_head_ = function;
241   }
242 
ProcessCandidates()243   void ProcessCandidates() {
244     ProcessSharedFunctionInfoCandidates();
245     ProcessJSFunctionCandidates();
246   }
247 
248  private:
ProcessJSFunctionCandidates()249   void ProcessJSFunctionCandidates() {
250     Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
251 
252     JSFunction* candidate = jsfunction_candidates_head_;
253     JSFunction* next_candidate;
254     while (candidate != NULL) {
255       next_candidate = GetNextCandidate(candidate);
256 
257       SharedFunctionInfo* shared = candidate->unchecked_shared();
258 
259       Code* code = shared->unchecked_code();
260       if (!code->IsMarked()) {
261         shared->set_code(lazy_compile);
262         candidate->set_code(lazy_compile);
263       } else {
264         candidate->set_code(shared->unchecked_code());
265       }
266 
267       candidate = next_candidate;
268     }
269 
270     jsfunction_candidates_head_ = NULL;
271   }
272 
273 
ProcessSharedFunctionInfoCandidates()274   void ProcessSharedFunctionInfoCandidates() {
275     Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
276 
277     SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
278     SharedFunctionInfo* next_candidate;
279     while (candidate != NULL) {
280       next_candidate = GetNextCandidate(candidate);
281       SetNextCandidate(candidate, NULL);
282 
283       Code* code = candidate->unchecked_code();
284       if (!code->IsMarked()) {
285         candidate->set_code(lazy_compile);
286       }
287 
288       candidate = next_candidate;
289     }
290 
291     shared_function_info_candidates_head_ = NULL;
292   }
293 
GetNextCandidateField(JSFunction * candidate)294   static JSFunction** GetNextCandidateField(JSFunction* candidate) {
295     return reinterpret_cast<JSFunction**>(
296         candidate->address() + JSFunction::kCodeEntryOffset);
297   }
298 
GetNextCandidate(JSFunction * candidate)299   static JSFunction* GetNextCandidate(JSFunction* candidate) {
300     return *GetNextCandidateField(candidate);
301   }
302 
SetNextCandidate(JSFunction * candidate,JSFunction * next_candidate)303   static void SetNextCandidate(JSFunction* candidate,
304                                JSFunction* next_candidate) {
305     *GetNextCandidateField(candidate) = next_candidate;
306   }
307 
308   STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
309 
GetNextCandidateField(SharedFunctionInfo * candidate)310   static SharedFunctionInfo** GetNextCandidateField(
311       SharedFunctionInfo* candidate) {
312     Code* code = candidate->unchecked_code();
313     return reinterpret_cast<SharedFunctionInfo**>(
314         code->address() + Code::kHeaderPaddingStart);
315   }
316 
GetNextCandidate(SharedFunctionInfo * candidate)317   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
318     return *GetNextCandidateField(candidate);
319   }
320 
SetNextCandidate(SharedFunctionInfo * candidate,SharedFunctionInfo * next_candidate)321   static void SetNextCandidate(SharedFunctionInfo* candidate,
322                                SharedFunctionInfo* next_candidate) {
323     *GetNextCandidateField(candidate) = next_candidate;
324   }
325 
326   Isolate* isolate_;
327   JSFunction* jsfunction_candidates_head_;
328   SharedFunctionInfo* shared_function_info_candidates_head_;
329 
330   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
331 };
332 
333 
~MarkCompactCollector()334 MarkCompactCollector::~MarkCompactCollector() {
335   if (code_flusher_ != NULL) {
336     delete code_flusher_;
337     code_flusher_ = NULL;
338   }
339 }
340 
341 
ShortCircuitConsString(Object ** p)342 static inline HeapObject* ShortCircuitConsString(Object** p) {
343   // Optimization: If the heap object pointed to by p is a non-symbol
344   // cons string whose right substring is HEAP->empty_string, update
345   // it in place to its left substring.  Return the updated value.
346   //
347   // Here we assume that if we change *p, we replace it with a heap object
348   // (ie, the left substring of a cons string is always a heap object).
349   //
350   // The check performed is:
351   //   object->IsConsString() && !object->IsSymbol() &&
352   //   (ConsString::cast(object)->second() == HEAP->empty_string())
353   // except the maps for the object and its possible substrings might be
354   // marked.
355   HeapObject* object = HeapObject::cast(*p);
356   MapWord map_word = object->map_word();
357   map_word.ClearMark();
358   InstanceType type = map_word.ToMap()->instance_type();
359   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
360 
361   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
362   Heap* heap = map_word.ToMap()->heap();
363   if (second != heap->raw_unchecked_empty_string()) {
364     return object;
365   }
366 
367   // Since we don't have the object's start, it is impossible to update the
368   // page dirty marks. Therefore, we only replace the string with its left
369   // substring when page dirty marks do not change.
370   Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
371   if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
372 
373   *p = first;
374   return HeapObject::cast(first);
375 }
376 
377 
378 class StaticMarkingVisitor : public StaticVisitorBase {
379  public:
IterateBody(Map * map,HeapObject * obj)380   static inline void IterateBody(Map* map, HeapObject* obj) {
381     table_.GetVisitor(map)(map, obj);
382   }
383 
Initialize()384   static void Initialize() {
385     table_.Register(kVisitShortcutCandidate,
386                     &FixedBodyVisitor<StaticMarkingVisitor,
387                                       ConsString::BodyDescriptor,
388                                       void>::Visit);
389 
390     table_.Register(kVisitConsString,
391                     &FixedBodyVisitor<StaticMarkingVisitor,
392                                       ConsString::BodyDescriptor,
393                                       void>::Visit);
394 
395 
396     table_.Register(kVisitFixedArray,
397                     &FlexibleBodyVisitor<StaticMarkingVisitor,
398                                          FixedArray::BodyDescriptor,
399                                          void>::Visit);
400 
401     table_.Register(kVisitGlobalContext,
402                     &FixedBodyVisitor<StaticMarkingVisitor,
403                                       Context::MarkCompactBodyDescriptor,
404                                       void>::Visit);
405 
406     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
407     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
408     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
409 
410     table_.Register(kVisitOddball,
411                     &FixedBodyVisitor<StaticMarkingVisitor,
412                                       Oddball::BodyDescriptor,
413                                       void>::Visit);
414     table_.Register(kVisitMap,
415                     &FixedBodyVisitor<StaticMarkingVisitor,
416                                       Map::BodyDescriptor,
417                                       void>::Visit);
418 
419     table_.Register(kVisitCode, &VisitCode);
420 
421     table_.Register(kVisitSharedFunctionInfo,
422                     &VisitSharedFunctionInfoAndFlushCode);
423 
424     table_.Register(kVisitJSFunction,
425                     &VisitJSFunctionAndFlushCode);
426 
427     table_.Register(kVisitPropertyCell,
428                     &FixedBodyVisitor<StaticMarkingVisitor,
429                                       JSGlobalPropertyCell::BodyDescriptor,
430                                       void>::Visit);
431 
432     table_.RegisterSpecializations<DataObjectVisitor,
433                                    kVisitDataObject,
434                                    kVisitDataObjectGeneric>();
435 
436     table_.RegisterSpecializations<JSObjectVisitor,
437                                    kVisitJSObject,
438                                    kVisitJSObjectGeneric>();
439 
440     table_.RegisterSpecializations<StructObjectVisitor,
441                                    kVisitStruct,
442                                    kVisitStructGeneric>();
443   }
444 
INLINE(static void VisitPointer (Heap * heap,Object ** p))445   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
446     MarkObjectByPointer(heap, p);
447   }
448 
INLINE(static void VisitPointers (Heap * heap,Object ** start,Object ** end))449   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
450     // Mark all objects pointed to in [start, end).
451     const int kMinRangeForMarkingRecursion = 64;
452     if (end - start >= kMinRangeForMarkingRecursion) {
453       if (VisitUnmarkedObjects(heap, start, end)) return;
454       // We are close to a stack overflow, so just mark the objects.
455     }
456     for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
457   }
458 
VisitCodeTarget(Heap * heap,RelocInfo * rinfo)459   static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
460     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
461     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
462     if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
463       IC::Clear(rinfo->pc());
464       // Please note targets for cleared inline cached do not have to be
465       // marked since they are contained in HEAP->non_monomorphic_cache().
466     } else {
467       heap->mark_compact_collector()->MarkObject(code);
468     }
469   }
470 
VisitGlobalPropertyCell(Heap * heap,RelocInfo * rinfo)471   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
472     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
473     Object* cell = rinfo->target_cell();
474     Object* old_cell = cell;
475     VisitPointer(heap, &cell);
476     if (cell != old_cell) {
477       rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
478     }
479   }
480 
VisitDebugTarget(Heap * heap,RelocInfo * rinfo)481   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
482     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
483             rinfo->IsPatchedReturnSequence()) ||
484            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
485             rinfo->IsPatchedDebugBreakSlotSequence()));
486     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
487     heap->mark_compact_collector()->MarkObject(code);
488   }
489 
490   // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (Heap * heap,Object ** p))491   INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
492     if (!(*p)->IsHeapObject()) return;
493     HeapObject* object = ShortCircuitConsString(p);
494     if (!object->IsMarked()) {
495       heap->mark_compact_collector()->MarkUnmarkedObject(object);
496     }
497   }
498 
499 
500   // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))501   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
502                                          HeapObject* obj)) {
503 #ifdef DEBUG
504     ASSERT(Isolate::Current()->heap()->Contains(obj));
505     ASSERT(!obj->IsMarked());
506 #endif
507     Map* map = obj->map();
508     collector->SetMark(obj);
509     // Mark the map pointer and the body.
510     if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
511     IterateBody(map, obj);
512   }
513 
514   // Visit all unmarked objects pointed to by [start, end).
515   // Returns false if the operation fails (lack of stack space).
VisitUnmarkedObjects(Heap * heap,Object ** start,Object ** end)516   static inline bool VisitUnmarkedObjects(Heap* heap,
517                                           Object** start,
518                                           Object** end) {
519     // Return false is we are close to the stack limit.
520     StackLimitCheck check(heap->isolate());
521     if (check.HasOverflowed()) return false;
522 
523     MarkCompactCollector* collector = heap->mark_compact_collector();
524     // Visit the unmarked objects.
525     for (Object** p = start; p < end; p++) {
526       if (!(*p)->IsHeapObject()) continue;
527       HeapObject* obj = HeapObject::cast(*p);
528       if (obj->IsMarked()) continue;
529       VisitUnmarkedObject(collector, obj);
530     }
531     return true;
532   }
533 
VisitExternalReference(Address * p)534   static inline void VisitExternalReference(Address* p) { }
VisitRuntimeEntry(RelocInfo * rinfo)535   static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
536 
537  private:
538   class DataObjectVisitor {
539    public:
540     template<int size>
VisitSpecialized(Map * map,HeapObject * object)541     static void VisitSpecialized(Map* map, HeapObject* object) {
542     }
543 
Visit(Map * map,HeapObject * object)544     static void Visit(Map* map, HeapObject* object) {
545     }
546   };
547 
548   typedef FlexibleBodyVisitor<StaticMarkingVisitor,
549                               JSObject::BodyDescriptor,
550                               void> JSObjectVisitor;
551 
552   typedef FlexibleBodyVisitor<StaticMarkingVisitor,
553                               StructBodyDescriptor,
554                               void> StructObjectVisitor;
555 
VisitCode(Map * map,HeapObject * object)556   static void VisitCode(Map* map, HeapObject* object) {
557     reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
558         map->heap());
559   }
560 
561   // Code flushing support.
562 
563   // How many collections newly compiled code object will survive before being
564   // flushed.
565   static const int kCodeAgeThreshold = 5;
566 
HasSourceCode(Heap * heap,SharedFunctionInfo * info)567   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
568     Object* undefined = heap->raw_unchecked_undefined_value();
569     return (info->script() != undefined) &&
570         (reinterpret_cast<Script*>(info->script())->source() != undefined);
571   }
572 
573 
IsCompiled(JSFunction * function)574   inline static bool IsCompiled(JSFunction* function) {
575     return function->unchecked_code() !=
576         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
577   }
578 
IsCompiled(SharedFunctionInfo * function)579   inline static bool IsCompiled(SharedFunctionInfo* function) {
580     return function->unchecked_code() !=
581         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
582   }
583 
IsFlushable(Heap * heap,JSFunction * function)584   inline static bool IsFlushable(Heap* heap, JSFunction* function) {
585     SharedFunctionInfo* shared_info = function->unchecked_shared();
586 
587     // Code is either on stack, in compilation cache or referenced
588     // by optimized version of function.
589     if (function->unchecked_code()->IsMarked()) {
590       shared_info->set_code_age(0);
591       return false;
592     }
593 
594     // We do not flush code for optimized functions.
595     if (function->code() != shared_info->unchecked_code()) {
596       return false;
597     }
598 
599     return IsFlushable(heap, shared_info);
600   }
601 
IsFlushable(Heap * heap,SharedFunctionInfo * shared_info)602   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
603     // Code is either on stack, in compilation cache or referenced
604     // by optimized version of function.
605     if (shared_info->unchecked_code()->IsMarked()) {
606       shared_info->set_code_age(0);
607       return false;
608     }
609 
610     // The function must be compiled and have the source code available,
611     // to be able to recompile it in case we need the function again.
612     if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
613       return false;
614     }
615 
616     // We never flush code for Api functions.
617     Object* function_data = shared_info->function_data();
618     if (function_data->IsHeapObject() &&
619         (SafeMap(function_data)->instance_type() ==
620          FUNCTION_TEMPLATE_INFO_TYPE)) {
621       return false;
622     }
623 
624     // Only flush code for functions.
625     if (shared_info->code()->kind() != Code::FUNCTION) return false;
626 
627     // Function must be lazy compilable.
628     if (!shared_info->allows_lazy_compilation()) return false;
629 
630     // If this is a full script wrapped in a function we do no flush the code.
631     if (shared_info->is_toplevel()) return false;
632 
633     // Age this shared function info.
634     if (shared_info->code_age() < kCodeAgeThreshold) {
635       shared_info->set_code_age(shared_info->code_age() + 1);
636       return false;
637     }
638 
639     return true;
640   }
641 
642 
FlushCodeForFunction(Heap * heap,JSFunction * function)643   static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
644     if (!IsFlushable(heap, function)) return false;
645 
646     // This function's code looks flushable. But we have to postpone the
647     // decision until we see all functions that point to the same
648     // SharedFunctionInfo because some of them might be optimized.
649     // That would make the nonoptimized version of the code nonflushable,
650     // because it is required for bailing out from optimized code.
651     heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
652     return true;
653   }
654 
655 
SafeMap(Object * obj)656   static inline Map* SafeMap(Object* obj) {
657     MapWord map_word = HeapObject::cast(obj)->map_word();
658     map_word.ClearMark();
659     map_word.ClearOverflow();
660     return map_word.ToMap();
661   }
662 
663 
IsJSBuiltinsObject(Object * obj)664   static inline bool IsJSBuiltinsObject(Object* obj) {
665     return obj->IsHeapObject() &&
666         (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
667   }
668 
669 
IsValidNotBuiltinContext(Object * ctx)670   static inline bool IsValidNotBuiltinContext(Object* ctx) {
671     if (!ctx->IsHeapObject()) return false;
672 
673     Map* map = SafeMap(ctx);
674     Heap* heap = map->heap();
675     if (!(map == heap->raw_unchecked_context_map() ||
676           map == heap->raw_unchecked_catch_context_map() ||
677           map == heap->raw_unchecked_global_context_map())) {
678       return false;
679     }
680 
681     Context* context = reinterpret_cast<Context*>(ctx);
682 
683     if (IsJSBuiltinsObject(context->global())) {
684       return false;
685     }
686 
687     return true;
688   }
689 
690 
VisitSharedFunctionInfoGeneric(Map * map,HeapObject * object)691   static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
692     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
693 
694     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
695 
696     FixedBodyVisitor<StaticMarkingVisitor,
697                      SharedFunctionInfo::BodyDescriptor,
698                      void>::Visit(map, object);
699   }
700 
701 
VisitSharedFunctionInfoAndFlushCode(Map * map,HeapObject * object)702   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
703                                                   HeapObject* object) {
704     MarkCompactCollector* collector = map->heap()->mark_compact_collector();
705     if (!collector->is_code_flushing_enabled()) {
706       VisitSharedFunctionInfoGeneric(map, object);
707       return;
708     }
709     VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
710   }
711 
712 
VisitSharedFunctionInfoAndFlushCodeGeneric(Map * map,HeapObject * object,bool known_flush_code_candidate)713   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
714       Map* map, HeapObject* object, bool known_flush_code_candidate) {
715     Heap* heap = map->heap();
716     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
717 
718     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
719 
720     if (!known_flush_code_candidate) {
721       known_flush_code_candidate = IsFlushable(heap, shared);
722       if (known_flush_code_candidate) {
723         heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
724       }
725     }
726 
727     VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
728   }
729 
730 
VisitCodeEntry(Heap * heap,Address entry_address)731   static void VisitCodeEntry(Heap* heap, Address entry_address) {
732     Object* code = Code::GetObjectFromEntryAddress(entry_address);
733     Object* old_code = code;
734     VisitPointer(heap, &code);
735     if (code != old_code) {
736       Memory::Address_at(entry_address) =
737           reinterpret_cast<Code*>(code)->entry();
738     }
739   }
740 
741 
VisitJSFunctionAndFlushCode(Map * map,HeapObject * object)742   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
743     Heap* heap = map->heap();
744     MarkCompactCollector* collector = heap->mark_compact_collector();
745     if (!collector->is_code_flushing_enabled()) {
746       VisitJSFunction(map, object);
747       return;
748     }
749 
750     JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
751     // The function must have a valid context and not be a builtin.
752     bool flush_code_candidate = false;
753     if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
754       flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
755     }
756 
757     if (!flush_code_candidate) {
758       collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
759 
760       if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
761         // For optimized functions we should retain both non-optimized version
762         // of it's code and non-optimized version of all inlined functions.
763         // This is required to support bailing out from inlined code.
764         DeoptimizationInputData* data =
765             reinterpret_cast<DeoptimizationInputData*>(
766                 jsfunction->unchecked_code()->unchecked_deoptimization_data());
767 
768         FixedArray* literals = data->UncheckedLiteralArray();
769 
770         for (int i = 0, count = data->InlinedFunctionCount()->value();
771              i < count;
772              i++) {
773           JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
774           collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
775         }
776       }
777     }
778 
779     VisitJSFunctionFields(map,
780                           reinterpret_cast<JSFunction*>(object),
781                           flush_code_candidate);
782   }
783 
784 
VisitJSFunction(Map * map,HeapObject * object)785   static void VisitJSFunction(Map* map, HeapObject* object) {
786     VisitJSFunctionFields(map,
787                           reinterpret_cast<JSFunction*>(object),
788                           false);
789   }
790 
791 
792 #define SLOT_ADDR(obj, offset) \
793   reinterpret_cast<Object**>((obj)->address() + offset)
794 
795 
VisitJSFunctionFields(Map * map,JSFunction * object,bool flush_code_candidate)796   static inline void VisitJSFunctionFields(Map* map,
797                                            JSFunction* object,
798                                            bool flush_code_candidate) {
799     Heap* heap = map->heap();
800     MarkCompactCollector* collector = heap->mark_compact_collector();
801 
802     VisitPointers(heap,
803                   SLOT_ADDR(object, JSFunction::kPropertiesOffset),
804                   SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
805 
806     if (!flush_code_candidate) {
807       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
808     } else {
809       // Don't visit code object.
810 
811       // Visit shared function info to avoid double checking of it's
812       // flushability.
813       SharedFunctionInfo* shared_info = object->unchecked_shared();
814       if (!shared_info->IsMarked()) {
815         Map* shared_info_map = shared_info->map();
816         collector->SetMark(shared_info);
817         collector->MarkObject(shared_info_map);
818         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
819                                                    shared_info,
820                                                    true);
821       }
822     }
823 
824     VisitPointers(heap,
825                   SLOT_ADDR(object,
826                             JSFunction::kCodeEntryOffset + kPointerSize),
827                   SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
828 
829     // Don't visit the next function list field as it is a weak reference.
830   }
831 
832 
VisitSharedFunctionInfoFields(Heap * heap,HeapObject * object,bool flush_code_candidate)833   static void VisitSharedFunctionInfoFields(Heap* heap,
834                                             HeapObject* object,
835                                             bool flush_code_candidate) {
836     VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
837 
838     if (!flush_code_candidate) {
839       VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
840     }
841 
842     VisitPointers(heap,
843                   SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
844                   SLOT_ADDR(object, SharedFunctionInfo::kSize));
845   }
846 
847   #undef SLOT_ADDR
848 
849   typedef void (*Callback)(Map* map, HeapObject* object);
850 
851   static VisitorDispatchTable<Callback> table_;
852 };
853 
854 
855 VisitorDispatchTable<StaticMarkingVisitor::Callback>
856   StaticMarkingVisitor::table_;
857 
858 
859 class MarkingVisitor : public ObjectVisitor {
860  public:
MarkingVisitor(Heap * heap)861   explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
862 
VisitPointer(Object ** p)863   void VisitPointer(Object** p) {
864     StaticMarkingVisitor::VisitPointer(heap_, p);
865   }
866 
VisitPointers(Object ** start,Object ** end)867   void VisitPointers(Object** start, Object** end) {
868     StaticMarkingVisitor::VisitPointers(heap_, start, end);
869   }
870 
VisitCodeTarget(Heap * heap,RelocInfo * rinfo)871   void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
872     StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
873   }
874 
VisitGlobalPropertyCell(Heap * heap,RelocInfo * rinfo)875   void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
876     StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
877   }
878 
VisitDebugTarget(Heap * heap,RelocInfo * rinfo)879   void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
880     StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
881   }
882 
883  private:
884   Heap* heap_;
885 };
886 
887 
888 class CodeMarkingVisitor : public ThreadVisitor {
889  public:
CodeMarkingVisitor(MarkCompactCollector * collector)890   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
891       : collector_(collector) {}
892 
VisitThread(Isolate * isolate,ThreadLocalTop * top)893   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
894     for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
895       collector_->MarkObject(it.frame()->unchecked_code());
896     }
897   }
898 
899  private:
900   MarkCompactCollector* collector_;
901 };
902 
903 
904 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
905  public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)906   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
907       : collector_(collector) {}
908 
VisitPointers(Object ** start,Object ** end)909   void VisitPointers(Object** start, Object** end) {
910     for (Object** p = start; p < end; p++) VisitPointer(p);
911   }
912 
VisitPointer(Object ** slot)913   void VisitPointer(Object** slot) {
914     Object* obj = *slot;
915     if (obj->IsSharedFunctionInfo()) {
916       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
917       collector_->MarkObject(shared->unchecked_code());
918       collector_->MarkObject(shared);
919     }
920   }
921 
922  private:
923   MarkCompactCollector* collector_;
924 };
925 
926 
PrepareForCodeFlushing()927 void MarkCompactCollector::PrepareForCodeFlushing() {
928   ASSERT(heap() == Isolate::Current()->heap());
929 
930   if (!FLAG_flush_code) {
931     EnableCodeFlushing(false);
932     return;
933   }
934 
935 #ifdef ENABLE_DEBUGGER_SUPPORT
936   if (heap()->isolate()->debug()->IsLoaded() ||
937       heap()->isolate()->debug()->has_break_points()) {
938     EnableCodeFlushing(false);
939     return;
940   }
941 #endif
942   EnableCodeFlushing(true);
943 
944   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
945   // relies on it being marked before any other descriptor array.
946   MarkObject(heap()->raw_unchecked_empty_descriptor_array());
947 
948   // Make sure we are not referencing the code from the stack.
949   ASSERT(this == heap()->mark_compact_collector());
950   for (StackFrameIterator it; !it.done(); it.Advance()) {
951     MarkObject(it.frame()->unchecked_code());
952   }
953 
954   // Iterate the archived stacks in all threads to check if
955   // the code is referenced.
956   CodeMarkingVisitor code_marking_visitor(this);
957   heap()->isolate()->thread_manager()->IterateArchivedThreads(
958       &code_marking_visitor);
959 
960   SharedFunctionInfoMarkingVisitor visitor(this);
961   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
962   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
963 
964   ProcessMarkingStack();
965 }
966 
967 
968 // Visitor class for marking heap roots.
969 class RootMarkingVisitor : public ObjectVisitor {
970  public:
RootMarkingVisitor(Heap * heap)971   explicit RootMarkingVisitor(Heap* heap)
972     : collector_(heap->mark_compact_collector()) { }
973 
VisitPointer(Object ** p)974   void VisitPointer(Object** p) {
975     MarkObjectByPointer(p);
976   }
977 
VisitPointers(Object ** start,Object ** end)978   void VisitPointers(Object** start, Object** end) {
979     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
980   }
981 
982  private:
MarkObjectByPointer(Object ** p)983   void MarkObjectByPointer(Object** p) {
984     if (!(*p)->IsHeapObject()) return;
985 
986     // Replace flat cons strings in place.
987     HeapObject* object = ShortCircuitConsString(p);
988     if (object->IsMarked()) return;
989 
990     Map* map = object->map();
991     // Mark the object.
992     collector_->SetMark(object);
993 
994     // Mark the map pointer and body, and push them on the marking stack.
995     collector_->MarkObject(map);
996     StaticMarkingVisitor::IterateBody(map, object);
997 
998     // Mark all the objects reachable from the map and body.  May leave
999     // overflowed objects in the heap.
1000     collector_->EmptyMarkingStack();
1001   }
1002 
1003   MarkCompactCollector* collector_;
1004 };
1005 
1006 
1007 // Helper class for pruning the symbol table.
1008 class SymbolTableCleaner : public ObjectVisitor {
1009  public:
SymbolTableCleaner(Heap * heap)1010   explicit SymbolTableCleaner(Heap* heap)
1011     : heap_(heap), pointers_removed_(0) { }
1012 
VisitPointers(Object ** start,Object ** end)1013   virtual void VisitPointers(Object** start, Object** end) {
1014     // Visit all HeapObject pointers in [start, end).
1015     for (Object** p = start; p < end; p++) {
1016       if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
1017         // Check if the symbol being pruned is an external symbol. We need to
1018         // delete the associated external data as this symbol is going away.
1019 
1020         // Since no objects have yet been moved we can safely access the map of
1021         // the object.
1022         if ((*p)->IsExternalString()) {
1023           heap_->FinalizeExternalString(String::cast(*p));
1024         }
1025         // Set the entry to null_value (as deleted).
1026         *p = heap_->raw_unchecked_null_value();
1027         pointers_removed_++;
1028       }
1029     }
1030   }
1031 
PointersRemoved()1032   int PointersRemoved() {
1033     return pointers_removed_;
1034   }
1035  private:
1036   Heap* heap_;
1037   int pointers_removed_;
1038 };
1039 
1040 
1041 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1042 // are retained.
1043 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1044  public:
RetainAs(Object * object)1045   virtual Object* RetainAs(Object* object) {
1046     MapWord first_word = HeapObject::cast(object)->map_word();
1047     if (first_word.IsMarked()) {
1048       return object;
1049     } else {
1050       return NULL;
1051     }
1052   }
1053 };
1054 
1055 
MarkUnmarkedObject(HeapObject * object)1056 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
1057   ASSERT(!object->IsMarked());
1058   ASSERT(HEAP->Contains(object));
1059   if (object->IsMap()) {
1060     Map* map = Map::cast(object);
1061     if (FLAG_cleanup_caches_in_maps_at_gc) {
1062       map->ClearCodeCache(heap());
1063     }
1064     SetMark(map);
1065     if (FLAG_collect_maps &&
1066         map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1067         map->instance_type() <= JS_FUNCTION_TYPE) {
1068       MarkMapContents(map);
1069     } else {
1070       marking_stack_.Push(map);
1071     }
1072   } else {
1073     SetMark(object);
1074     marking_stack_.Push(object);
1075   }
1076 }
1077 
1078 
MarkMapContents(Map * map)1079 void MarkCompactCollector::MarkMapContents(Map* map) {
1080   // Mark prototype transitions array but don't push it into marking stack.
1081   // This will make references from it weak. We will clean dead prototype
1082   // transitions in ClearNonLiveTransitions.
1083   FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
1084   if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
1085 
1086   MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
1087       *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
1088 
1089   // Mark the Object* fields of the Map.
1090   // Since the descriptor array has been marked already, it is fine
1091   // that one of these fields contains a pointer to it.
1092   Object** start_slot = HeapObject::RawField(map,
1093                                              Map::kPointerFieldsBeginOffset);
1094 
1095   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1096 
1097   StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
1098 }
1099 
1100 
MarkDescriptorArray(DescriptorArray * descriptors)1101 void MarkCompactCollector::MarkDescriptorArray(
1102     DescriptorArray* descriptors) {
1103   if (descriptors->IsMarked()) return;
1104   // Empty descriptor array is marked as a root before any maps are marked.
1105   ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
1106   SetMark(descriptors);
1107 
1108   FixedArray* contents = reinterpret_cast<FixedArray*>(
1109       descriptors->get(DescriptorArray::kContentArrayIndex));
1110   ASSERT(contents->IsHeapObject());
1111   ASSERT(!contents->IsMarked());
1112   ASSERT(contents->IsFixedArray());
1113   ASSERT(contents->length() >= 2);
1114   SetMark(contents);
1115   // Contents contains (value, details) pairs.  If the details say that the type
1116   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1117   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1118   // live.  Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1119   // CONSTANT_TRANSITION is the value an Object* (a Map*).
1120   for (int i = 0; i < contents->length(); i += 2) {
1121     // If the pair (value, details) at index i, i+1 is not
1122     // a transition or null descriptor, mark the value.
1123     PropertyDetails details(Smi::cast(contents->get(i + 1)));
1124     if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1125       HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1126       if (object->IsHeapObject() && !object->IsMarked()) {
1127         SetMark(object);
1128         marking_stack_.Push(object);
1129       }
1130     }
1131   }
1132   // The DescriptorArray descriptors contains a pointer to its contents array,
1133   // but the contents array is already marked.
1134   marking_stack_.Push(descriptors);
1135 }
1136 
1137 
CreateBackPointers()1138 void MarkCompactCollector::CreateBackPointers() {
1139   HeapObjectIterator iterator(heap()->map_space());
1140   for (HeapObject* next_object = iterator.next();
1141        next_object != NULL; next_object = iterator.next()) {
1142     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
1143       Map* map = Map::cast(next_object);
1144       if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1145           map->instance_type() <= JS_FUNCTION_TYPE) {
1146         map->CreateBackPointers();
1147       } else {
1148         ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
1149       }
1150     }
1151   }
1152 }
1153 
1154 
OverflowObjectSize(HeapObject * obj)1155 static int OverflowObjectSize(HeapObject* obj) {
1156   // Recover the normal map pointer, it might be marked as live and
1157   // overflowed.
1158   MapWord map_word = obj->map_word();
1159   map_word.ClearMark();
1160   map_word.ClearOverflow();
1161   return obj->SizeFromMap(map_word.ToMap());
1162 }
1163 
1164 
1165 class OverflowedObjectsScanner : public AllStatic {
1166  public:
1167   // Fill the marking stack with overflowed objects returned by the given
1168   // iterator.  Stop when the marking stack is filled or the end of the space
1169   // is reached, whichever comes first.
1170   template<class T>
ScanOverflowedObjects(MarkCompactCollector * collector,T * it)1171   static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1172                                            T* it) {
1173     // The caller should ensure that the marking stack is initially not full,
1174     // so that we don't waste effort pointlessly scanning for objects.
1175     ASSERT(!collector->marking_stack_.is_full());
1176 
1177     for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
1178       if (object->IsOverflowed()) {
1179         object->ClearOverflow();
1180         ASSERT(object->IsMarked());
1181         ASSERT(HEAP->Contains(object));
1182         collector->marking_stack_.Push(object);
1183         if (collector->marking_stack_.is_full()) return;
1184       }
1185     }
1186   }
1187 };
1188 
1189 
IsUnmarkedHeapObject(Object ** p)1190 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1191   return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
1192 }
1193 
1194 
MarkSymbolTable()1195 void MarkCompactCollector::MarkSymbolTable() {
1196   SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1197   // Mark the symbol table itself.
1198   SetMark(symbol_table);
1199   // Explicitly mark the prefix.
1200   MarkingVisitor marker(heap());
1201   symbol_table->IteratePrefix(&marker);
1202   ProcessMarkingStack();
1203 }
1204 
1205 
MarkRoots(RootMarkingVisitor * visitor)1206 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1207   // Mark the heap roots including global variables, stack variables,
1208   // etc., and all objects reachable from them.
1209   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1210 
1211   // Handle the symbol table specially.
1212   MarkSymbolTable();
1213 
1214   // There may be overflowed objects in the heap.  Visit them now.
1215   while (marking_stack_.overflowed()) {
1216     RefillMarkingStack();
1217     EmptyMarkingStack();
1218   }
1219 }
1220 
1221 
MarkObjectGroups()1222 void MarkCompactCollector::MarkObjectGroups() {
1223   List<ObjectGroup*>* object_groups =
1224       heap()->isolate()->global_handles()->object_groups();
1225 
1226   int last = 0;
1227   for (int i = 0; i < object_groups->length(); i++) {
1228     ObjectGroup* entry = object_groups->at(i);
1229     ASSERT(entry != NULL);
1230 
1231     Object*** objects = entry->objects_;
1232     bool group_marked = false;
1233     for (size_t j = 0; j < entry->length_; j++) {
1234       Object* object = *objects[j];
1235       if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1236         group_marked = true;
1237         break;
1238       }
1239     }
1240 
1241     if (!group_marked) {
1242       (*object_groups)[last++] = entry;
1243       continue;
1244     }
1245 
1246     // An object in the group is marked, so mark all heap objects in
1247     // the group.
1248     for (size_t j = 0; j < entry->length_; ++j) {
1249       if ((*objects[j])->IsHeapObject()) {
1250         MarkObject(HeapObject::cast(*objects[j]));
1251       }
1252     }
1253 
1254     // Once the entire group has been marked, dispose it because it's
1255     // not needed anymore.
1256     entry->Dispose();
1257   }
1258   object_groups->Rewind(last);
1259 }
1260 
1261 
MarkImplicitRefGroups()1262 void MarkCompactCollector::MarkImplicitRefGroups() {
1263   List<ImplicitRefGroup*>* ref_groups =
1264       heap()->isolate()->global_handles()->implicit_ref_groups();
1265 
1266   int last = 0;
1267   for (int i = 0; i < ref_groups->length(); i++) {
1268     ImplicitRefGroup* entry = ref_groups->at(i);
1269     ASSERT(entry != NULL);
1270 
1271     if (!(*entry->parent_)->IsMarked()) {
1272       (*ref_groups)[last++] = entry;
1273       continue;
1274     }
1275 
1276     Object*** children = entry->children_;
1277     // A parent object is marked, so mark all child heap objects.
1278     for (size_t j = 0; j < entry->length_; ++j) {
1279       if ((*children[j])->IsHeapObject()) {
1280         MarkObject(HeapObject::cast(*children[j]));
1281       }
1282     }
1283 
1284     // Once the entire group has been marked, dispose it because it's
1285     // not needed anymore.
1286     entry->Dispose();
1287   }
1288   ref_groups->Rewind(last);
1289 }
1290 
1291 
1292 // Mark all objects reachable from the objects on the marking stack.
1293 // Before: the marking stack contains zero or more heap object pointers.
1294 // After: the marking stack is empty, and all objects reachable from the
1295 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingStack()1296 void MarkCompactCollector::EmptyMarkingStack() {
1297   while (!marking_stack_.is_empty()) {
1298     HeapObject* object = marking_stack_.Pop();
1299     ASSERT(object->IsHeapObject());
1300     ASSERT(heap()->Contains(object));
1301     ASSERT(object->IsMarked());
1302     ASSERT(!object->IsOverflowed());
1303 
1304     // Because the object is marked, we have to recover the original map
1305     // pointer and use it to mark the object's body.
1306     MapWord map_word = object->map_word();
1307     map_word.ClearMark();
1308     Map* map = map_word.ToMap();
1309     MarkObject(map);
1310 
1311     StaticMarkingVisitor::IterateBody(map, object);
1312   }
1313 }
1314 
1315 
1316 // Sweep the heap for overflowed objects, clear their overflow bits, and
1317 // push them on the marking stack.  Stop early if the marking stack fills
1318 // before sweeping completes.  If sweeping completes, there are no remaining
1319 // overflowed objects in the heap so the overflow flag on the markings stack
1320 // is cleared.
RefillMarkingStack()1321 void MarkCompactCollector::RefillMarkingStack() {
1322   ASSERT(marking_stack_.overflowed());
1323 
1324   SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
1325   OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
1326   if (marking_stack_.is_full()) return;
1327 
1328   HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
1329                                     &OverflowObjectSize);
1330   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
1331   if (marking_stack_.is_full()) return;
1332 
1333   HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
1334   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
1335   if (marking_stack_.is_full()) return;
1336 
1337   HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
1338   OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
1339   if (marking_stack_.is_full()) return;
1340 
1341   HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
1342   OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
1343   if (marking_stack_.is_full()) return;
1344 
1345   HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
1346   OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
1347   if (marking_stack_.is_full()) return;
1348 
1349   LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
1350   OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
1351   if (marking_stack_.is_full()) return;
1352 
1353   marking_stack_.clear_overflowed();
1354 }
1355 
1356 
1357 // Mark all objects reachable (transitively) from objects on the marking
1358 // stack.  Before: the marking stack contains zero or more heap object
1359 // pointers.  After: the marking stack is empty and there are no overflowed
1360 // objects in the heap.
ProcessMarkingStack()1361 void MarkCompactCollector::ProcessMarkingStack() {
1362   EmptyMarkingStack();
1363   while (marking_stack_.overflowed()) {
1364     RefillMarkingStack();
1365     EmptyMarkingStack();
1366   }
1367 }
1368 
1369 
ProcessExternalMarking()1370 void MarkCompactCollector::ProcessExternalMarking() {
1371   bool work_to_do = true;
1372   ASSERT(marking_stack_.is_empty());
1373   while (work_to_do) {
1374     MarkObjectGroups();
1375     MarkImplicitRefGroups();
1376     work_to_do = !marking_stack_.is_empty();
1377     ProcessMarkingStack();
1378   }
1379 }
1380 
1381 
MarkLiveObjects()1382 void MarkCompactCollector::MarkLiveObjects() {
1383   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1384   // The recursive GC marker detects when it is nearing stack overflow,
1385   // and switches to a different marking system.  JS interrupts interfere
1386   // with the C stack limit check.
1387   PostponeInterruptsScope postpone(heap()->isolate());
1388 
1389 #ifdef DEBUG
1390   ASSERT(state_ == PREPARE_GC);
1391   state_ = MARK_LIVE_OBJECTS;
1392 #endif
1393   // The to space contains live objects, the from space is used as a marking
1394   // stack.
1395   marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
1396                             heap()->new_space()->FromSpaceHigh());
1397 
1398   ASSERT(!marking_stack_.overflowed());
1399 
1400   PrepareForCodeFlushing();
1401 
1402   RootMarkingVisitor root_visitor(heap());
1403   MarkRoots(&root_visitor);
1404 
1405   // The objects reachable from the roots are marked, yet unreachable
1406   // objects are unmarked.  Mark objects reachable due to host
1407   // application specific logic.
1408   ProcessExternalMarking();
1409 
1410   // The objects reachable from the roots or object groups are marked,
1411   // yet unreachable objects are unmarked.  Mark objects reachable
1412   // only from weak global handles.
1413   //
1414   // First we identify nonlive weak handles and mark them as pending
1415   // destruction.
1416   heap()->isolate()->global_handles()->IdentifyWeakHandles(
1417       &IsUnmarkedHeapObject);
1418   // Then we mark the objects and process the transitive closure.
1419   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
1420   while (marking_stack_.overflowed()) {
1421     RefillMarkingStack();
1422     EmptyMarkingStack();
1423   }
1424 
1425   // Repeat host application specific marking to mark unmarked objects
1426   // reachable from the weak roots.
1427   ProcessExternalMarking();
1428 
1429   // Prune the symbol table removing all symbols only pointed to by the
1430   // symbol table.  Cannot use symbol_table() here because the symbol
1431   // table is marked.
1432   SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1433   SymbolTableCleaner v(heap());
1434   symbol_table->IterateElements(&v);
1435   symbol_table->ElementsRemoved(v.PointersRemoved());
1436   heap()->external_string_table_.Iterate(&v);
1437   heap()->external_string_table_.CleanUp();
1438 
1439   // Process the weak references.
1440   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1441   heap()->ProcessWeakReferences(&mark_compact_object_retainer);
1442 
1443   // Remove object groups after marking phase.
1444   heap()->isolate()->global_handles()->RemoveObjectGroups();
1445   heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
1446 
1447   // Flush code from collected candidates.
1448   if (is_code_flushing_enabled()) {
1449     code_flusher_->ProcessCandidates();
1450   }
1451 
1452   // Clean up dead objects from the runtime profiler.
1453   heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
1454 }
1455 
1456 
1457 #ifdef DEBUG
UpdateLiveObjectCount(HeapObject * obj)1458 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1459   live_bytes_ += obj->Size();
1460   if (heap()->new_space()->Contains(obj)) {
1461     live_young_objects_size_ += obj->Size();
1462   } else if (heap()->map_space()->Contains(obj)) {
1463     ASSERT(obj->IsMap());
1464     live_map_objects_size_ += obj->Size();
1465   } else if (heap()->cell_space()->Contains(obj)) {
1466     ASSERT(obj->IsJSGlobalPropertyCell());
1467     live_cell_objects_size_ += obj->Size();
1468   } else if (heap()->old_pointer_space()->Contains(obj)) {
1469     live_old_pointer_objects_size_ += obj->Size();
1470   } else if (heap()->old_data_space()->Contains(obj)) {
1471     live_old_data_objects_size_ += obj->Size();
1472   } else if (heap()->code_space()->Contains(obj)) {
1473     live_code_objects_size_ += obj->Size();
1474   } else if (heap()->lo_space()->Contains(obj)) {
1475     live_lo_objects_size_ += obj->Size();
1476   } else {
1477     UNREACHABLE();
1478   }
1479 }
1480 #endif  // DEBUG
1481 
1482 
SweepLargeObjectSpace()1483 void MarkCompactCollector::SweepLargeObjectSpace() {
1484 #ifdef DEBUG
1485   ASSERT(state_ == MARK_LIVE_OBJECTS);
1486   state_ =
1487       compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1488 #endif
1489   // Deallocate unmarked objects and clear marked bits for marked objects.
1490   heap()->lo_space()->FreeUnmarkedObjects();
1491 }
1492 
1493 
1494 // Safe to use during marking phase only.
SafeIsMap(HeapObject * object)1495 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1496   MapWord metamap = object->map_word();
1497   metamap.ClearMark();
1498   return metamap.ToMap()->instance_type() == MAP_TYPE;
1499 }
1500 
1501 
ClearNonLiveTransitions()1502 void MarkCompactCollector::ClearNonLiveTransitions() {
1503   HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
1504   // Iterate over the map space, setting map transitions that go from
1505   // a marked map to an unmarked map to null transitions.  At the same time,
1506   // set all the prototype fields of maps back to their original value,
1507   // dropping the back pointers temporarily stored in the prototype field.
1508   // Setting the prototype field requires following the linked list of
1509   // back pointers, reversing them all at once.  This allows us to find
1510   // those maps with map transitions that need to be nulled, and only
1511   // scan the descriptor arrays of those maps, not all maps.
1512   // All of these actions are carried out only on maps of JSObjects
1513   // and related subtypes.
1514   for (HeapObject* obj = map_iterator.next();
1515        obj != NULL; obj = map_iterator.next()) {
1516     Map* map = reinterpret_cast<Map*>(obj);
1517     if (!map->IsMarked() && map->IsByteArray()) continue;
1518 
1519     ASSERT(SafeIsMap(map));
1520     // Only JSObject and subtypes have map transitions and back pointers.
1521     if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1522     if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1523 
1524     if (map->IsMarked() && map->attached_to_shared_function_info()) {
1525       // This map is used for inobject slack tracking and has been detached
1526       // from SharedFunctionInfo during the mark phase.
1527       // Since it survived the GC, reattach it now.
1528       map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1529     }
1530 
1531     // Clear dead prototype transitions.
1532     FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
1533     if (prototype_transitions->length() > 0) {
1534       int finger = Smi::cast(prototype_transitions->get(0))->value();
1535       int new_finger = 1;
1536       for (int i = 1; i < finger; i += 2) {
1537         Object* prototype = prototype_transitions->get(i);
1538         Object* cached_map = prototype_transitions->get(i + 1);
1539         if (HeapObject::cast(prototype)->IsMarked() &&
1540             HeapObject::cast(cached_map)->IsMarked()) {
1541           if (new_finger != i) {
1542             prototype_transitions->set_unchecked(heap_,
1543                                                  new_finger,
1544                                                  prototype,
1545                                                  UPDATE_WRITE_BARRIER);
1546             prototype_transitions->set_unchecked(heap_,
1547                                                  new_finger + 1,
1548                                                  cached_map,
1549                                                  SKIP_WRITE_BARRIER);
1550           }
1551           new_finger += 2;
1552         }
1553       }
1554 
1555       // Fill slots that became free with undefined value.
1556       Object* undefined = heap()->raw_unchecked_undefined_value();
1557       for (int i = new_finger; i < finger; i++) {
1558         prototype_transitions->set_unchecked(heap_,
1559                                              i,
1560                                              undefined,
1561                                              SKIP_WRITE_BARRIER);
1562       }
1563       prototype_transitions->set_unchecked(0, Smi::FromInt(new_finger));
1564     }
1565 
1566     // Follow the chain of back pointers to find the prototype.
1567     Map* current = map;
1568     while (SafeIsMap(current)) {
1569       current = reinterpret_cast<Map*>(current->prototype());
1570       ASSERT(current->IsHeapObject());
1571     }
1572     Object* real_prototype = current;
1573 
1574     // Follow back pointers, setting them to prototype,
1575     // clearing map transitions when necessary.
1576     current = map;
1577     bool on_dead_path = !current->IsMarked();
1578     Object* next;
1579     while (SafeIsMap(current)) {
1580       next = current->prototype();
1581       // There should never be a dead map above a live map.
1582       ASSERT(on_dead_path || current->IsMarked());
1583 
1584       // A live map above a dead map indicates a dead transition.
1585       // This test will always be false on the first iteration.
1586       if (on_dead_path && current->IsMarked()) {
1587         on_dead_path = false;
1588         current->ClearNonLiveTransitions(heap(), real_prototype);
1589       }
1590       *HeapObject::RawField(current, Map::kPrototypeOffset) =
1591           real_prototype;
1592       current = reinterpret_cast<Map*>(next);
1593     }
1594   }
1595 }
1596 
1597 // -------------------------------------------------------------------------
1598 // Phase 2: Encode forwarding addresses.
1599 // When compacting, forwarding addresses for objects in old space and map
1600 // space are encoded in their map pointer word (along with an encoding of
1601 // their map pointers).
1602 //
1603 // The excact encoding is described in the comments for class MapWord in
1604 // objects.h.
1605 //
1606 // An address range [start, end) can have both live and non-live objects.
1607 // Maximal non-live regions are marked so they can be skipped on subsequent
1608 // sweeps of the heap.  A distinguished map-pointer encoding is used to mark
1609 // free regions of one-word size (in which case the next word is the start
1610 // of a live object).  A second distinguished map-pointer encoding is used
1611 // to mark free regions larger than one word, and the size of the free
1612 // region (including the first word) is written to the second word of the
1613 // region.
1614 //
1615 // Any valid map page offset must lie in the object area of the page, so map
1616 // page offsets less than Page::kObjectStartOffset are invalid.  We use a
1617 // pair of distinguished invalid map encodings (for single word and multiple
1618 // words) to indicate free regions in the page found during computation of
1619 // forwarding addresses and skipped over in subsequent sweeps.
1620 
1621 
1622 // Encode a free region, defined by the given start address and size, in the
1623 // first word or two of the region.
EncodeFreeRegion(Address free_start,int free_size)1624 void EncodeFreeRegion(Address free_start, int free_size) {
1625   ASSERT(free_size >= kIntSize);
1626   if (free_size == kIntSize) {
1627     Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
1628   } else {
1629     ASSERT(free_size >= 2 * kIntSize);
1630     Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
1631     Memory::int_at(free_start + kIntSize) = free_size;
1632   }
1633 
1634 #ifdef DEBUG
1635   // Zap the body of the free region.
1636   if (FLAG_enable_slow_asserts) {
1637     for (int offset = 2 * kIntSize;
1638          offset < free_size;
1639          offset += kPointerSize) {
1640       Memory::Address_at(free_start + offset) = kZapValue;
1641     }
1642   }
1643 #endif
1644 }
1645 
1646 
1647 // Try to promote all objects in new space.  Heap numbers and sequential
1648 // strings are promoted to the code space, large objects to large object space,
1649 // and all others to the old space.
MCAllocateFromNewSpace(Heap * heap,HeapObject * object,int object_size)1650 inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1651                                            HeapObject* object,
1652                                            int object_size) {
1653   MaybeObject* forwarded;
1654   if (object_size > heap->MaxObjectSizeInPagedSpace()) {
1655     forwarded = Failure::Exception();
1656   } else {
1657     OldSpace* target_space = heap->TargetSpace(object);
1658     ASSERT(target_space == heap->old_pointer_space() ||
1659            target_space == heap->old_data_space());
1660     forwarded = target_space->MCAllocateRaw(object_size);
1661   }
1662   Object* result;
1663   if (!forwarded->ToObject(&result)) {
1664     result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
1665   }
1666   return result;
1667 }
1668 
1669 
1670 // Allocation functions for the paged spaces call the space's MCAllocateRaw.
MCAllocateFromOldPointerSpace(Heap * heap,HeapObject * ignore,int object_size)1671 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
1672     Heap *heap,
1673     HeapObject* ignore,
1674     int object_size) {
1675   return heap->old_pointer_space()->MCAllocateRaw(object_size);
1676 }
1677 
1678 
MCAllocateFromOldDataSpace(Heap * heap,HeapObject * ignore,int object_size)1679 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
1680     Heap* heap,
1681     HeapObject* ignore,
1682     int object_size) {
1683   return heap->old_data_space()->MCAllocateRaw(object_size);
1684 }
1685 
1686 
MCAllocateFromCodeSpace(Heap * heap,HeapObject * ignore,int object_size)1687 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
1688     Heap* heap,
1689     HeapObject* ignore,
1690     int object_size) {
1691   return heap->code_space()->MCAllocateRaw(object_size);
1692 }
1693 
1694 
MCAllocateFromMapSpace(Heap * heap,HeapObject * ignore,int object_size)1695 MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
1696     Heap* heap,
1697     HeapObject* ignore,
1698     int object_size) {
1699   return heap->map_space()->MCAllocateRaw(object_size);
1700 }
1701 
1702 
MCAllocateFromCellSpace(Heap * heap,HeapObject * ignore,int object_size)1703 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1704     Heap* heap, HeapObject* ignore, int object_size) {
1705   return heap->cell_space()->MCAllocateRaw(object_size);
1706 }
1707 
1708 
1709 // The forwarding address is encoded at the same offset as the current
1710 // to-space object, but in from space.
EncodeForwardingAddressInNewSpace(Heap * heap,HeapObject * old_object,int object_size,Object * new_object,int * ignored)1711 inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1712                                               HeapObject* old_object,
1713                                               int object_size,
1714                                               Object* new_object,
1715                                               int* ignored) {
1716   int offset =
1717       heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1718   Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
1719       HeapObject::cast(new_object)->address();
1720 }
1721 
1722 
1723 // The forwarding address is encoded in the map pointer of the object as an
1724 // offset (in terms of live bytes) from the address of the first live object
1725 // in the page.
EncodeForwardingAddressInPagedSpace(Heap * heap,HeapObject * old_object,int object_size,Object * new_object,int * offset)1726 inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1727                                                 HeapObject* old_object,
1728                                                 int object_size,
1729                                                 Object* new_object,
1730                                                 int* offset) {
1731   // Record the forwarding address of the first live object if necessary.
1732   if (*offset == 0) {
1733     Page::FromAddress(old_object->address())->mc_first_forwarded =
1734         HeapObject::cast(new_object)->address();
1735   }
1736 
1737   MapWord encoding =
1738       MapWord::EncodeAddress(old_object->map()->address(), *offset);
1739   old_object->set_map_word(encoding);
1740   *offset += object_size;
1741   ASSERT(*offset <= Page::kObjectAreaSize);
1742 }
1743 
1744 
1745 // Most non-live objects are ignored.
IgnoreNonLiveObject(HeapObject * object,Isolate * isolate)1746 inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
1747 
1748 
1749 // Function template that, given a range of addresses (eg, a semispace or a
1750 // paged space page), iterates through the objects in the range to clear
1751 // mark bits and compute and encode forwarding addresses.  As a side effect,
1752 // maximal free chunks are marked so that they can be skipped on subsequent
1753 // sweeps.
1754 //
1755 // The template parameters are an allocation function, a forwarding address
1756 // encoding function, and a function to process non-live objects.
1757 template<MarkCompactCollector::AllocationFunction Alloc,
1758          MarkCompactCollector::EncodingFunction Encode,
1759          MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
EncodeForwardingAddressesInRange(MarkCompactCollector * collector,Address start,Address end,int * offset)1760 inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
1761                                              Address start,
1762                                              Address end,
1763                                              int* offset) {
1764   // The start address of the current free region while sweeping the space.
1765   // This address is set when a transition from live to non-live objects is
1766   // encountered.  A value (an encoding of the 'next free region' pointer)
1767   // is written to memory at this address when a transition from non-live to
1768   // live objects is encountered.
1769   Address free_start = NULL;
1770 
1771   // A flag giving the state of the previously swept object.  Initially true
1772   // to ensure that free_start is initialized to a proper address before
1773   // trying to write to it.
1774   bool is_prev_alive = true;
1775 
1776   int object_size;  // Will be set on each iteration of the loop.
1777   for (Address current = start; current < end; current += object_size) {
1778     HeapObject* object = HeapObject::FromAddress(current);
1779     if (object->IsMarked()) {
1780       object->ClearMark();
1781       collector->tracer()->decrement_marked_count();
1782       object_size = object->Size();
1783 
1784       Object* forwarded =
1785           Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
1786       Encode(collector->heap(), object, object_size, forwarded, offset);
1787 
1788 #ifdef DEBUG
1789       if (FLAG_gc_verbose) {
1790         PrintF("forward %p -> %p.\n", object->address(),
1791                HeapObject::cast(forwarded)->address());
1792       }
1793 #endif
1794       if (!is_prev_alive) {  // Transition from non-live to live.
1795         EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
1796         is_prev_alive = true;
1797       }
1798     } else {  // Non-live object.
1799       object_size = object->Size();
1800       ProcessNonLive(object, collector->heap()->isolate());
1801       if (is_prev_alive) {  // Transition from live to non-live.
1802         free_start = current;
1803         is_prev_alive = false;
1804       }
1805       LiveObjectList::ProcessNonLive(object);
1806     }
1807   }
1808 
1809   // If we ended on a free region, mark it.
1810   if (!is_prev_alive) {
1811     EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1812   }
1813 }
1814 
1815 
1816 // Functions to encode the forwarding pointers in each compactable space.
EncodeForwardingAddressesInNewSpace()1817 void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1818   int ignored;
1819   EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1820                                    EncodeForwardingAddressInNewSpace,
1821                                    IgnoreNonLiveObject>(
1822       this,
1823       heap()->new_space()->bottom(),
1824       heap()->new_space()->top(),
1825       &ignored);
1826 }
1827 
1828 
1829 template<MarkCompactCollector::AllocationFunction Alloc,
1830          MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
EncodeForwardingAddressesInPagedSpace(PagedSpace * space)1831 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1832     PagedSpace* space) {
1833   PageIterator it(space, PageIterator::PAGES_IN_USE);
1834   while (it.has_next()) {
1835     Page* p = it.next();
1836 
1837     // The offset of each live object in the page from the first live object
1838     // in the page.
1839     int offset = 0;
1840     EncodeForwardingAddressesInRange<Alloc,
1841                                      EncodeForwardingAddressInPagedSpace,
1842                                      ProcessNonLive>(
1843         this,
1844         p->ObjectAreaStart(),
1845         p->AllocationTop(),
1846         &offset);
1847   }
1848 }
1849 
1850 
1851 // We scavange new space simultaneously with sweeping. This is done in two
1852 // passes.
1853 // The first pass migrates all alive objects from one semispace to another or
1854 // promotes them to old space. Forwading address is written directly into
1855 // first word of object without any encoding. If object is dead we are writing
1856 // NULL as a forwarding address.
1857 // The second pass updates pointers to new space in all spaces. It is possible
1858 // to encounter pointers to dead objects during traversal of dirty regions we
1859 // should clear them to avoid encountering them during next dirty regions
1860 // iteration.
MigrateObject(Heap * heap,Address dst,Address src,int size,bool to_old_space)1861 static void MigrateObject(Heap* heap,
1862                           Address dst,
1863                           Address src,
1864                           int size,
1865                           bool to_old_space) {
1866   if (to_old_space) {
1867     heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1868   } else {
1869     heap->CopyBlock(dst, src, size);
1870   }
1871 
1872   Memory::Address_at(src) = dst;
1873 }
1874 
1875 
1876 class StaticPointersToNewGenUpdatingVisitor : public
1877   StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1878  public:
VisitPointer(Heap * heap,Object ** p)1879   static inline void VisitPointer(Heap* heap, Object** p) {
1880     if (!(*p)->IsHeapObject()) return;
1881 
1882     HeapObject* obj = HeapObject::cast(*p);
1883     Address old_addr = obj->address();
1884 
1885     if (heap->new_space()->Contains(obj)) {
1886       ASSERT(heap->InFromSpace(*p));
1887       *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1888     }
1889   }
1890 };
1891 
1892 
1893 // Visitor for updating pointers from live objects in old spaces to new space.
1894 // It does not expect to encounter pointers to dead objects.
1895 class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1896  public:
PointersToNewGenUpdatingVisitor(Heap * heap)1897   explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
1898 
VisitPointer(Object ** p)1899   void VisitPointer(Object** p) {
1900     StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
1901   }
1902 
VisitPointers(Object ** start,Object ** end)1903   void VisitPointers(Object** start, Object** end) {
1904     for (Object** p = start; p < end; p++) {
1905       StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
1906     }
1907   }
1908 
VisitCodeTarget(RelocInfo * rinfo)1909   void VisitCodeTarget(RelocInfo* rinfo) {
1910     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1911     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1912     VisitPointer(&target);
1913     rinfo->set_target_address(Code::cast(target)->instruction_start());
1914   }
1915 
VisitDebugTarget(RelocInfo * rinfo)1916   void VisitDebugTarget(RelocInfo* rinfo) {
1917     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1918             rinfo->IsPatchedReturnSequence()) ||
1919            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1920             rinfo->IsPatchedDebugBreakSlotSequence()));
1921     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1922     VisitPointer(&target);
1923     rinfo->set_call_address(Code::cast(target)->instruction_start());
1924   }
1925  private:
1926   Heap* heap_;
1927 };
1928 
1929 
1930 // Visitor for updating pointers from live objects in old spaces to new space.
1931 // It can encounter pointers to dead objects in new space when traversing map
1932 // space (see comment for MigrateObject).
UpdatePointerToNewGen(HeapObject ** p)1933 static void UpdatePointerToNewGen(HeapObject** p) {
1934   if (!(*p)->IsHeapObject()) return;
1935 
1936   Address old_addr = (*p)->address();
1937   ASSERT(HEAP->InFromSpace(*p));
1938 
1939   Address new_addr = Memory::Address_at(old_addr);
1940 
1941   if (new_addr == NULL) {
1942     // We encountered pointer to a dead object. Clear it so we will
1943     // not visit it again during next iteration of dirty regions.
1944     *p = NULL;
1945   } else {
1946     *p = HeapObject::FromAddress(new_addr);
1947   }
1948 }
1949 
1950 
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1951 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1952                                                                  Object** p) {
1953   Address old_addr = HeapObject::cast(*p)->address();
1954   Address new_addr = Memory::Address_at(old_addr);
1955   return String::cast(HeapObject::FromAddress(new_addr));
1956 }
1957 
1958 
TryPromoteObject(Heap * heap,HeapObject * object,int object_size)1959 static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
1960   Object* result;
1961 
1962   if (object_size > heap->MaxObjectSizeInPagedSpace()) {
1963     MaybeObject* maybe_result =
1964         heap->lo_space()->AllocateRawFixedArray(object_size);
1965     if (maybe_result->ToObject(&result)) {
1966       HeapObject* target = HeapObject::cast(result);
1967       MigrateObject(heap, target->address(), object->address(), object_size,
1968                     true);
1969       heap->mark_compact_collector()->tracer()->
1970           increment_promoted_objects_size(object_size);
1971       return true;
1972     }
1973   } else {
1974     OldSpace* target_space = heap->TargetSpace(object);
1975 
1976     ASSERT(target_space == heap->old_pointer_space() ||
1977            target_space == heap->old_data_space());
1978     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
1979     if (maybe_result->ToObject(&result)) {
1980       HeapObject* target = HeapObject::cast(result);
1981       MigrateObject(heap,
1982                     target->address(),
1983                     object->address(),
1984                     object_size,
1985                     target_space == heap->old_pointer_space());
1986       heap->mark_compact_collector()->tracer()->
1987           increment_promoted_objects_size(object_size);
1988       return true;
1989     }
1990   }
1991 
1992   return false;
1993 }
1994 
1995 
SweepNewSpace(Heap * heap,NewSpace * space)1996 static void SweepNewSpace(Heap* heap, NewSpace* space) {
1997   heap->CheckNewSpaceExpansionCriteria();
1998 
1999   Address from_bottom = space->bottom();
2000   Address from_top = space->top();
2001 
2002   // Flip the semispaces.  After flipping, to space is empty, from space has
2003   // live objects.
2004   space->Flip();
2005   space->ResetAllocationInfo();
2006 
2007   int size = 0;
2008   int survivors_size = 0;
2009 
2010   // First pass: traverse all objects in inactive semispace, remove marks,
2011   // migrate live objects and write forwarding addresses.
2012   for (Address current = from_bottom; current < from_top; current += size) {
2013     HeapObject* object = HeapObject::FromAddress(current);
2014 
2015     if (object->IsMarked()) {
2016       object->ClearMark();
2017       heap->mark_compact_collector()->tracer()->decrement_marked_count();
2018 
2019       size = object->Size();
2020       survivors_size += size;
2021 
2022       // Aggressively promote young survivors to the old space.
2023       if (TryPromoteObject(heap, object, size)) {
2024         continue;
2025       }
2026 
2027       // Promotion failed. Just migrate object to another semispace.
2028       // Allocation cannot fail at this point: semispaces are of equal size.
2029       Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
2030 
2031       MigrateObject(heap,
2032                     HeapObject::cast(target)->address(),
2033                     current,
2034                     size,
2035                     false);
2036     } else {
2037       // Process the dead object before we write a NULL into its header.
2038       LiveObjectList::ProcessNonLive(object);
2039 
2040       size = object->Size();
2041       Memory::Address_at(current) = NULL;
2042     }
2043   }
2044 
2045   // Second pass: find pointers to new space and update them.
2046   PointersToNewGenUpdatingVisitor updating_visitor(heap);
2047 
2048   // Update pointers in to space.
2049   Address current = space->bottom();
2050   while (current < space->top()) {
2051     HeapObject* object = HeapObject::FromAddress(current);
2052     current +=
2053         StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2054                                                            object);
2055   }
2056 
2057   // Update roots.
2058   heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
2059   LiveObjectList::IterateElements(&updating_visitor);
2060 
2061   // Update pointers in old spaces.
2062   heap->IterateDirtyRegions(heap->old_pointer_space(),
2063                             &Heap::IteratePointersInDirtyRegion,
2064                             &UpdatePointerToNewGen,
2065                             heap->WATERMARK_SHOULD_BE_VALID);
2066 
2067   heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
2068 
2069   // Update pointers from cells.
2070   HeapObjectIterator cell_iterator(heap->cell_space());
2071   for (HeapObject* cell = cell_iterator.next();
2072        cell != NULL;
2073        cell = cell_iterator.next()) {
2074     if (cell->IsJSGlobalPropertyCell()) {
2075       Address value_address =
2076           reinterpret_cast<Address>(cell) +
2077           (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2078       updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2079     }
2080   }
2081 
2082   // Update pointer from the global contexts list.
2083   updating_visitor.VisitPointer(heap->global_contexts_list_address());
2084 
2085   // Update pointers from external string table.
2086   heap->UpdateNewSpaceReferencesInExternalStringTable(
2087       &UpdateNewSpaceReferenceInExternalStringTableEntry);
2088 
2089   // All pointers were updated. Update auxiliary allocation info.
2090   heap->IncrementYoungSurvivorsCounter(survivors_size);
2091   space->set_age_mark(space->top());
2092 
2093   // Update JSFunction pointers from the runtime profiler.
2094   heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
2095 }
2096 
2097 
SweepSpace(Heap * heap,PagedSpace * space)2098 static void SweepSpace(Heap* heap, PagedSpace* space) {
2099   PageIterator it(space, PageIterator::PAGES_IN_USE);
2100 
2101   // During sweeping of paged space we are trying to find longest sequences
2102   // of pages without live objects and free them (instead of putting them on
2103   // the free list).
2104 
2105   // Page preceding current.
2106   Page* prev = Page::FromAddress(NULL);
2107 
2108   // First empty page in a sequence.
2109   Page* first_empty_page = Page::FromAddress(NULL);
2110 
2111   // Page preceding first empty page.
2112   Page* prec_first_empty_page = Page::FromAddress(NULL);
2113 
2114   // If last used page of space ends with a sequence of dead objects
2115   // we can adjust allocation top instead of puting this free area into
2116   // the free list. Thus during sweeping we keep track of such areas
2117   // and defer their deallocation until the sweeping of the next page
2118   // is done: if one of the next pages contains live objects we have
2119   // to put such area into the free list.
2120   Address last_free_start = NULL;
2121   int last_free_size = 0;
2122 
2123   while (it.has_next()) {
2124     Page* p = it.next();
2125 
2126     bool is_previous_alive = true;
2127     Address free_start = NULL;
2128     HeapObject* object;
2129 
2130     for (Address current = p->ObjectAreaStart();
2131          current < p->AllocationTop();
2132          current += object->Size()) {
2133       object = HeapObject::FromAddress(current);
2134       if (object->IsMarked()) {
2135         object->ClearMark();
2136         heap->mark_compact_collector()->tracer()->decrement_marked_count();
2137 
2138         if (!is_previous_alive) {  // Transition from free to live.
2139           space->DeallocateBlock(free_start,
2140                                  static_cast<int>(current - free_start),
2141                                  true);
2142           is_previous_alive = true;
2143         }
2144       } else {
2145         heap->mark_compact_collector()->ReportDeleteIfNeeded(
2146             object, heap->isolate());
2147         if (is_previous_alive) {  // Transition from live to free.
2148           free_start = current;
2149           is_previous_alive = false;
2150         }
2151         LiveObjectList::ProcessNonLive(object);
2152       }
2153       // The object is now unmarked for the call to Size() at the top of the
2154       // loop.
2155     }
2156 
2157     bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2158         || (!is_previous_alive && free_start == p->ObjectAreaStart());
2159 
2160     if (page_is_empty) {
2161       // This page is empty. Check whether we are in the middle of
2162       // sequence of empty pages and start one if not.
2163       if (!first_empty_page->is_valid()) {
2164         first_empty_page = p;
2165         prec_first_empty_page = prev;
2166       }
2167 
2168       if (!is_previous_alive) {
2169         // There are dead objects on this page. Update space accounting stats
2170         // without putting anything into free list.
2171         int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2172         if (size_in_bytes > 0) {
2173           space->DeallocateBlock(free_start, size_in_bytes, false);
2174         }
2175       }
2176     } else {
2177       // This page is not empty. Sequence of empty pages ended on the previous
2178       // one.
2179       if (first_empty_page->is_valid()) {
2180         space->FreePages(prec_first_empty_page, prev);
2181         prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2182       }
2183 
2184       // If there is a free ending area on one of the previous pages we have
2185       // deallocate that area and put it on the free list.
2186       if (last_free_size > 0) {
2187         Page::FromAddress(last_free_start)->
2188             SetAllocationWatermark(last_free_start);
2189         space->DeallocateBlock(last_free_start, last_free_size, true);
2190         last_free_start = NULL;
2191         last_free_size  = 0;
2192       }
2193 
2194       // If the last region of this page was not live we remember it.
2195       if (!is_previous_alive) {
2196         ASSERT(last_free_size == 0);
2197         last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2198         last_free_start = free_start;
2199       }
2200     }
2201 
2202     prev = p;
2203   }
2204 
2205   // We reached end of space. See if we need to adjust allocation top.
2206   Address new_allocation_top = NULL;
2207 
2208   if (first_empty_page->is_valid()) {
2209     // Last used pages in space are empty. We can move allocation top backwards
2210     // to the beginning of first empty page.
2211     ASSERT(prev == space->AllocationTopPage());
2212 
2213     new_allocation_top = first_empty_page->ObjectAreaStart();
2214   }
2215 
2216   if (last_free_size > 0) {
2217     // There was a free ending area on the previous page.
2218     // Deallocate it without putting it into freelist and move allocation
2219     // top to the beginning of this free area.
2220     space->DeallocateBlock(last_free_start, last_free_size, false);
2221     new_allocation_top = last_free_start;
2222   }
2223 
2224   if (new_allocation_top != NULL) {
2225 #ifdef DEBUG
2226     Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2227     if (!first_empty_page->is_valid()) {
2228       ASSERT(new_allocation_top_page == space->AllocationTopPage());
2229     } else if (last_free_size > 0) {
2230       ASSERT(new_allocation_top_page == prec_first_empty_page);
2231     } else {
2232       ASSERT(new_allocation_top_page == first_empty_page);
2233     }
2234 #endif
2235 
2236     space->SetTop(new_allocation_top);
2237   }
2238 }
2239 
2240 
EncodeForwardingAddresses()2241 void MarkCompactCollector::EncodeForwardingAddresses() {
2242   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2243   // Objects in the active semispace of the young generation may be
2244   // relocated to the inactive semispace (if not promoted).  Set the
2245   // relocation info to the beginning of the inactive semispace.
2246   heap()->new_space()->MCResetRelocationInfo();
2247 
2248   // Compute the forwarding pointers in each space.
2249   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
2250                                         ReportDeleteIfNeeded>(
2251       heap()->old_pointer_space());
2252 
2253   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2254                                         IgnoreNonLiveObject>(
2255       heap()->old_data_space());
2256 
2257   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
2258                                         ReportDeleteIfNeeded>(
2259       heap()->code_space());
2260 
2261   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2262                                         IgnoreNonLiveObject>(
2263       heap()->cell_space());
2264 
2265 
2266   // Compute new space next to last after the old and code spaces have been
2267   // compacted.  Objects in new space can be promoted to old or code space.
2268   EncodeForwardingAddressesInNewSpace();
2269 
2270   // Compute map space last because computing forwarding addresses
2271   // overwrites non-live objects.  Objects in the other spaces rely on
2272   // non-live map pointers to get the sizes of non-live objects.
2273   EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2274                                         IgnoreNonLiveObject>(
2275       heap()->map_space());
2276 
2277   // Write relocation info to the top page, so we can use it later.  This is
2278   // done after promoting objects from the new space so we get the correct
2279   // allocation top.
2280   heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2281   heap()->old_data_space()->MCWriteRelocationInfoToPage();
2282   heap()->code_space()->MCWriteRelocationInfoToPage();
2283   heap()->map_space()->MCWriteRelocationInfoToPage();
2284   heap()->cell_space()->MCWriteRelocationInfoToPage();
2285 }
2286 
2287 
2288 class MapIterator : public HeapObjectIterator {
2289  public:
MapIterator(Heap * heap)2290   explicit MapIterator(Heap* heap)
2291     : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
2292 
MapIterator(Heap * heap,Address start)2293   MapIterator(Heap* heap, Address start)
2294       : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
2295 
2296  private:
SizeCallback(HeapObject * unused)2297   static int SizeCallback(HeapObject* unused) {
2298     USE(unused);
2299     return Map::kSize;
2300   }
2301 };
2302 
2303 
2304 class MapCompact {
2305  public:
MapCompact(Heap * heap,int live_maps)2306   explicit MapCompact(Heap* heap, int live_maps)
2307     : heap_(heap),
2308       live_maps_(live_maps),
2309       to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
2310       vacant_map_it_(heap),
2311       map_to_evacuate_it_(heap, to_evacuate_start_),
2312       first_map_to_evacuate_(
2313           reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2314   }
2315 
CompactMaps()2316   void CompactMaps() {
2317     // As we know the number of maps to evacuate beforehand,
2318     // we stop then there is no more vacant maps.
2319     for (Map* next_vacant_map = NextVacantMap();
2320          next_vacant_map;
2321          next_vacant_map = NextVacantMap()) {
2322       EvacuateMap(next_vacant_map, NextMapToEvacuate());
2323     }
2324 
2325 #ifdef DEBUG
2326     CheckNoMapsToEvacuate();
2327 #endif
2328   }
2329 
UpdateMapPointersInRoots()2330   void UpdateMapPointersInRoots() {
2331     MapUpdatingVisitor map_updating_visitor;
2332     heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2333     heap()->isolate()->global_handles()->IterateWeakRoots(
2334         &map_updating_visitor);
2335     LiveObjectList::IterateElements(&map_updating_visitor);
2336   }
2337 
UpdateMapPointersInPagedSpace(PagedSpace * space)2338   void UpdateMapPointersInPagedSpace(PagedSpace* space) {
2339     ASSERT(space != heap()->map_space());
2340 
2341     PageIterator it(space, PageIterator::PAGES_IN_USE);
2342     while (it.has_next()) {
2343       Page* p = it.next();
2344       UpdateMapPointersInRange(heap(),
2345                                p->ObjectAreaStart(),
2346                                p->AllocationTop());
2347     }
2348   }
2349 
UpdateMapPointersInNewSpace()2350   void UpdateMapPointersInNewSpace() {
2351     NewSpace* space = heap()->new_space();
2352     UpdateMapPointersInRange(heap(), space->bottom(), space->top());
2353   }
2354 
UpdateMapPointersInLargeObjectSpace()2355   void UpdateMapPointersInLargeObjectSpace() {
2356     LargeObjectIterator it(heap()->lo_space());
2357     for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2358       UpdateMapPointersInObject(heap(), obj);
2359   }
2360 
Finish()2361   void Finish() {
2362     heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
2363   }
2364 
heap() const2365   inline Heap* heap() const { return heap_; }
2366 
2367  private:
2368   Heap* heap_;
2369   int live_maps_;
2370   Address to_evacuate_start_;
2371   MapIterator vacant_map_it_;
2372   MapIterator map_to_evacuate_it_;
2373   Map* first_map_to_evacuate_;
2374 
2375   // Helper class for updating map pointers in HeapObjects.
2376   class MapUpdatingVisitor: public ObjectVisitor {
2377   public:
MapUpdatingVisitor()2378     MapUpdatingVisitor() {}
2379 
VisitPointer(Object ** p)2380     void VisitPointer(Object** p) {
2381       UpdateMapPointer(p);
2382     }
2383 
VisitPointers(Object ** start,Object ** end)2384     void VisitPointers(Object** start, Object** end) {
2385       for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2386     }
2387 
2388   private:
UpdateMapPointer(Object ** p)2389     void UpdateMapPointer(Object** p) {
2390       if (!(*p)->IsHeapObject()) return;
2391       HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2392 
2393       // Moved maps are tagged with overflowed map word.  They are the only
2394       // objects those map word is overflowed as marking is already complete.
2395       MapWord map_word = old_map->map_word();
2396       if (!map_word.IsOverflowed()) return;
2397 
2398       *p = GetForwardedMap(map_word);
2399     }
2400   };
2401 
NextMap(MapIterator * it,HeapObject * last,bool live)2402   static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2403     while (true) {
2404       HeapObject* next = it->next();
2405       ASSERT(next != NULL);
2406       if (next == last)
2407         return NULL;
2408       ASSERT(!next->IsOverflowed());
2409       ASSERT(!next->IsMarked());
2410       ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2411       if (next->IsMap() == live)
2412         return reinterpret_cast<Map*>(next);
2413     }
2414   }
2415 
NextVacantMap()2416   Map* NextVacantMap() {
2417     Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2418     ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2419     return map;
2420   }
2421 
NextMapToEvacuate()2422   Map* NextMapToEvacuate() {
2423     Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2424     ASSERT(map != NULL);
2425     ASSERT(map->IsMap());
2426     return map;
2427   }
2428 
EvacuateMap(Map * vacant_map,Map * map_to_evacuate)2429   static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2430     ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2431     ASSERT(map_to_evacuate->IsMap());
2432 
2433     ASSERT(Map::kSize % 4 == 0);
2434 
2435     map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2436         vacant_map->address(), map_to_evacuate->address(), Map::kSize);
2437 
2438     ASSERT(vacant_map->IsMap());  // Due to memcpy above.
2439 
2440     MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2441     forwarding_map_word.SetOverflow();
2442     map_to_evacuate->set_map_word(forwarding_map_word);
2443 
2444     ASSERT(map_to_evacuate->map_word().IsOverflowed());
2445     ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2446   }
2447 
GetForwardedMap(MapWord map_word)2448   static Map* GetForwardedMap(MapWord map_word) {
2449     ASSERT(map_word.IsOverflowed());
2450     map_word.ClearOverflow();
2451     Map* new_map = map_word.ToMap();
2452     ASSERT_MAP_ALIGNED(new_map->address());
2453     return new_map;
2454   }
2455 
UpdateMapPointersInObject(Heap * heap,HeapObject * obj)2456   static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
2457     ASSERT(!obj->IsMarked());
2458     Map* map = obj->map();
2459     ASSERT(heap->map_space()->Contains(map));
2460     MapWord map_word = map->map_word();
2461     ASSERT(!map_word.IsMarked());
2462     if (map_word.IsOverflowed()) {
2463       Map* new_map = GetForwardedMap(map_word);
2464       ASSERT(heap->map_space()->Contains(new_map));
2465       obj->set_map(new_map);
2466 
2467 #ifdef DEBUG
2468       if (FLAG_gc_verbose) {
2469         PrintF("update %p : %p -> %p\n",
2470                obj->address(),
2471                reinterpret_cast<void*>(map),
2472                reinterpret_cast<void*>(new_map));
2473       }
2474 #endif
2475     }
2476 
2477     int size = obj->SizeFromMap(map);
2478     MapUpdatingVisitor map_updating_visitor;
2479     obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
2480     return size;
2481   }
2482 
UpdateMapPointersInRange(Heap * heap,Address start,Address end)2483   static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
2484     HeapObject* object;
2485     int size;
2486     for (Address current = start; current < end; current += size) {
2487       object = HeapObject::FromAddress(current);
2488       size = UpdateMapPointersInObject(heap, object);
2489       ASSERT(size > 0);
2490     }
2491   }
2492 
2493 #ifdef DEBUG
CheckNoMapsToEvacuate()2494   void CheckNoMapsToEvacuate() {
2495     if (!FLAG_enable_slow_asserts)
2496       return;
2497 
2498     for (HeapObject* obj = map_to_evacuate_it_.next();
2499          obj != NULL; obj = map_to_evacuate_it_.next())
2500       ASSERT(FreeListNode::IsFreeListNode(obj));
2501   }
2502 #endif
2503 };
2504 
2505 
SweepSpaces()2506 void MarkCompactCollector::SweepSpaces() {
2507   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2508 
2509   ASSERT(state_ == SWEEP_SPACES);
2510   ASSERT(!IsCompacting());
2511   // Noncompacting collections simply sweep the spaces to clear the mark
2512   // bits and free the nonlive blocks (for old and map spaces).  We sweep
2513   // the map space last because freeing non-live maps overwrites them and
2514   // the other spaces rely on possibly non-live maps to get the sizes for
2515   // non-live objects.
2516   SweepSpace(heap(), heap()->old_pointer_space());
2517   SweepSpace(heap(), heap()->old_data_space());
2518   SweepSpace(heap(), heap()->code_space());
2519   SweepSpace(heap(), heap()->cell_space());
2520   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2521     SweepNewSpace(heap(), heap()->new_space());
2522   }
2523   SweepSpace(heap(), heap()->map_space());
2524 
2525   heap()->IterateDirtyRegions(heap()->map_space(),
2526                              &heap()->IteratePointersInDirtyMapsRegion,
2527                              &UpdatePointerToNewGen,
2528                              heap()->WATERMARK_SHOULD_BE_VALID);
2529 
2530   intptr_t live_maps_size = heap()->map_space()->Size();
2531   int live_maps = static_cast<int>(live_maps_size / Map::kSize);
2532   ASSERT(live_map_objects_size_ == live_maps_size);
2533 
2534   if (heap()->map_space()->NeedsCompaction(live_maps)) {
2535     MapCompact map_compact(heap(), live_maps);
2536 
2537     map_compact.CompactMaps();
2538     map_compact.UpdateMapPointersInRoots();
2539 
2540     PagedSpaces spaces;
2541     for (PagedSpace* space = spaces.next();
2542          space != NULL; space = spaces.next()) {
2543       if (space == heap()->map_space()) continue;
2544       map_compact.UpdateMapPointersInPagedSpace(space);
2545     }
2546     map_compact.UpdateMapPointersInNewSpace();
2547     map_compact.UpdateMapPointersInLargeObjectSpace();
2548 
2549     map_compact.Finish();
2550   }
2551 }
2552 
2553 
2554 // Iterate the live objects in a range of addresses (eg, a page or a
2555 // semispace).  The live regions of the range have been linked into a list.
2556 // The first live region is [first_live_start, first_live_end), and the last
2557 // address in the range is top.  The callback function is used to get the
2558 // size of each live object.
IterateLiveObjectsInRange(Address start,Address end,LiveObjectCallback size_func)2559 int MarkCompactCollector::IterateLiveObjectsInRange(
2560     Address start,
2561     Address end,
2562     LiveObjectCallback size_func) {
2563   int live_objects_size = 0;
2564   Address current = start;
2565   while (current < end) {
2566     uint32_t encoded_map = Memory::uint32_at(current);
2567     if (encoded_map == kSingleFreeEncoding) {
2568       current += kPointerSize;
2569     } else if (encoded_map == kMultiFreeEncoding) {
2570       current += Memory::int_at(current + kIntSize);
2571     } else {
2572       int size = (this->*size_func)(HeapObject::FromAddress(current));
2573       current += size;
2574       live_objects_size += size;
2575     }
2576   }
2577   return live_objects_size;
2578 }
2579 
2580 
IterateLiveObjects(NewSpace * space,LiveObjectCallback size_f)2581 int MarkCompactCollector::IterateLiveObjects(
2582     NewSpace* space, LiveObjectCallback size_f) {
2583   ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2584   return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2585 }
2586 
2587 
IterateLiveObjects(PagedSpace * space,LiveObjectCallback size_f)2588 int MarkCompactCollector::IterateLiveObjects(
2589     PagedSpace* space, LiveObjectCallback size_f) {
2590   ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2591   int total = 0;
2592   PageIterator it(space, PageIterator::PAGES_IN_USE);
2593   while (it.has_next()) {
2594     Page* p = it.next();
2595     total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2596                                        p->AllocationTop(),
2597                                        size_f);
2598   }
2599   return total;
2600 }
2601 
2602 
2603 // -------------------------------------------------------------------------
2604 // Phase 3: Update pointers
2605 
2606 // Helper class for updating pointers in HeapObjects.
2607 class UpdatingVisitor: public ObjectVisitor {
2608  public:
UpdatingVisitor(Heap * heap)2609   explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2610 
VisitPointer(Object ** p)2611   void VisitPointer(Object** p) {
2612     UpdatePointer(p);
2613   }
2614 
VisitPointers(Object ** start,Object ** end)2615   void VisitPointers(Object** start, Object** end) {
2616     // Mark all HeapObject pointers in [start, end)
2617     for (Object** p = start; p < end; p++) UpdatePointer(p);
2618   }
2619 
VisitCodeTarget(RelocInfo * rinfo)2620   void VisitCodeTarget(RelocInfo* rinfo) {
2621     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2622     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2623     VisitPointer(&target);
2624     rinfo->set_target_address(
2625         reinterpret_cast<Code*>(target)->instruction_start());
2626   }
2627 
VisitDebugTarget(RelocInfo * rinfo)2628   void VisitDebugTarget(RelocInfo* rinfo) {
2629     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2630             rinfo->IsPatchedReturnSequence()) ||
2631            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2632             rinfo->IsPatchedDebugBreakSlotSequence()));
2633     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2634     VisitPointer(&target);
2635     rinfo->set_call_address(
2636         reinterpret_cast<Code*>(target)->instruction_start());
2637   }
2638 
heap() const2639   inline Heap* heap() const { return heap_; }
2640 
2641  private:
UpdatePointer(Object ** p)2642   void UpdatePointer(Object** p) {
2643     if (!(*p)->IsHeapObject()) return;
2644 
2645     HeapObject* obj = HeapObject::cast(*p);
2646     Address old_addr = obj->address();
2647     Address new_addr;
2648     ASSERT(!heap()->InFromSpace(obj));
2649 
2650     if (heap()->new_space()->Contains(obj)) {
2651       Address forwarding_pointer_addr =
2652           heap()->new_space()->FromSpaceLow() +
2653           heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
2654       new_addr = Memory::Address_at(forwarding_pointer_addr);
2655 
2656 #ifdef DEBUG
2657       ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2658              heap()->old_data_space()->Contains(new_addr) ||
2659              heap()->new_space()->FromSpaceContains(new_addr) ||
2660              heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2661 
2662       if (heap()->new_space()->FromSpaceContains(new_addr)) {
2663         ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2664                heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
2665       }
2666 #endif
2667 
2668     } else if (heap()->lo_space()->Contains(obj)) {
2669       // Don't move objects in the large object space.
2670       return;
2671 
2672     } else {
2673 #ifdef DEBUG
2674       PagedSpaces spaces;
2675       PagedSpace* original_space = spaces.next();
2676       while (original_space != NULL) {
2677         if (original_space->Contains(obj)) break;
2678         original_space = spaces.next();
2679       }
2680       ASSERT(original_space != NULL);
2681 #endif
2682       new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2683       ASSERT(original_space->Contains(new_addr));
2684       ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2685              original_space->MCSpaceOffsetForAddress(old_addr));
2686     }
2687 
2688     *p = HeapObject::FromAddress(new_addr);
2689 
2690 #ifdef DEBUG
2691     if (FLAG_gc_verbose) {
2692       PrintF("update %p : %p -> %p\n",
2693              reinterpret_cast<Address>(p), old_addr, new_addr);
2694     }
2695 #endif
2696   }
2697 
2698   Heap* heap_;
2699 };
2700 
2701 
UpdatePointers()2702 void MarkCompactCollector::UpdatePointers() {
2703 #ifdef DEBUG
2704   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2705   state_ = UPDATE_POINTERS;
2706 #endif
2707   UpdatingVisitor updating_visitor(heap());
2708   heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2709       &updating_visitor);
2710   heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2711   heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
2712 
2713   // Update the pointer to the head of the weak list of global contexts.
2714   updating_visitor.VisitPointer(&heap()->global_contexts_list_);
2715 
2716   LiveObjectList::IterateElements(&updating_visitor);
2717 
2718   int live_maps_size = IterateLiveObjects(
2719       heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2720   int live_pointer_olds_size = IterateLiveObjects(
2721       heap()->old_pointer_space(),
2722       &MarkCompactCollector::UpdatePointersInOldObject);
2723   int live_data_olds_size = IterateLiveObjects(
2724       heap()->old_data_space(),
2725       &MarkCompactCollector::UpdatePointersInOldObject);
2726   int live_codes_size = IterateLiveObjects(
2727       heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2728   int live_cells_size = IterateLiveObjects(
2729       heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2730   int live_news_size = IterateLiveObjects(
2731       heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
2732 
2733   // Large objects do not move, the map word can be updated directly.
2734   LargeObjectIterator it(heap()->lo_space());
2735   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2736     UpdatePointersInNewObject(obj);
2737   }
2738 
2739   USE(live_maps_size);
2740   USE(live_pointer_olds_size);
2741   USE(live_data_olds_size);
2742   USE(live_codes_size);
2743   USE(live_cells_size);
2744   USE(live_news_size);
2745   ASSERT(live_maps_size == live_map_objects_size_);
2746   ASSERT(live_data_olds_size == live_old_data_objects_size_);
2747   ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2748   ASSERT(live_codes_size == live_code_objects_size_);
2749   ASSERT(live_cells_size == live_cell_objects_size_);
2750   ASSERT(live_news_size == live_young_objects_size_);
2751 }
2752 
2753 
UpdatePointersInNewObject(HeapObject * obj)2754 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2755   // Keep old map pointers
2756   Map* old_map = obj->map();
2757   ASSERT(old_map->IsHeapObject());
2758 
2759   Address forwarded = GetForwardingAddressInOldSpace(old_map);
2760 
2761   ASSERT(heap()->map_space()->Contains(old_map));
2762   ASSERT(heap()->map_space()->Contains(forwarded));
2763 #ifdef DEBUG
2764   if (FLAG_gc_verbose) {
2765     PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2766            forwarded);
2767   }
2768 #endif
2769   // Update the map pointer.
2770   obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2771 
2772   // We have to compute the object size relying on the old map because
2773   // map objects are not relocated yet.
2774   int obj_size = obj->SizeFromMap(old_map);
2775 
2776   // Update pointers in the object body.
2777   UpdatingVisitor updating_visitor(heap());
2778   obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2779   return obj_size;
2780 }
2781 
2782 
UpdatePointersInOldObject(HeapObject * obj)2783 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2784   // Decode the map pointer.
2785   MapWord encoding = obj->map_word();
2786   Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
2787   ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
2788 
2789   // At this point, the first word of map_addr is also encoded, cannot
2790   // cast it to Map* using Map::cast.
2791   Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2792   int obj_size = obj->SizeFromMap(map);
2793   InstanceType type = map->instance_type();
2794 
2795   // Update map pointer.
2796   Address new_map_addr = GetForwardingAddressInOldSpace(map);
2797   int offset = encoding.DecodeOffset();
2798   obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2799 
2800 #ifdef DEBUG
2801   if (FLAG_gc_verbose) {
2802     PrintF("update %p : %p -> %p\n", obj->address(),
2803            map_addr, new_map_addr);
2804   }
2805 #endif
2806 
2807   // Update pointers in the object body.
2808   UpdatingVisitor updating_visitor(heap());
2809   obj->IterateBody(type, obj_size, &updating_visitor);
2810   return obj_size;
2811 }
2812 
2813 
GetForwardingAddressInOldSpace(HeapObject * obj)2814 Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2815   // Object should either in old or map space.
2816   MapWord encoding = obj->map_word();
2817 
2818   // Offset to the first live object's forwarding address.
2819   int offset = encoding.DecodeOffset();
2820   Address obj_addr = obj->address();
2821 
2822   // Find the first live object's forwarding address.
2823   Page* p = Page::FromAddress(obj_addr);
2824   Address first_forwarded = p->mc_first_forwarded;
2825 
2826   // Page start address of forwarded address.
2827   Page* forwarded_page = Page::FromAddress(first_forwarded);
2828   int forwarded_offset = forwarded_page->Offset(first_forwarded);
2829 
2830   // Find end of allocation in the page of first_forwarded.
2831   int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
2832 
2833   // Check if current object's forward pointer is in the same page
2834   // as the first live object's forwarding pointer
2835   if (forwarded_offset + offset < mc_top_offset) {
2836     // In the same page.
2837     return first_forwarded + offset;
2838   }
2839 
2840   // Must be in the next page, NOTE: this may cross chunks.
2841   Page* next_page = forwarded_page->next_page();
2842   ASSERT(next_page->is_valid());
2843 
2844   offset -= (mc_top_offset - forwarded_offset);
2845   offset += Page::kObjectStartOffset;
2846 
2847   ASSERT_PAGE_OFFSET(offset);
2848   ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
2849 
2850   return next_page->OffsetToAddress(offset);
2851 }
2852 
2853 
2854 // -------------------------------------------------------------------------
2855 // Phase 4: Relocate objects
2856 
RelocateObjects()2857 void MarkCompactCollector::RelocateObjects() {
2858 #ifdef DEBUG
2859   ASSERT(state_ == UPDATE_POINTERS);
2860   state_ = RELOCATE_OBJECTS;
2861 #endif
2862   // Relocates objects, always relocate map objects first. Relocating
2863   // objects in other space relies on map objects to get object size.
2864   int live_maps_size = IterateLiveObjects(
2865       heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
2866   int live_pointer_olds_size = IterateLiveObjects(
2867       heap()->old_pointer_space(),
2868       &MarkCompactCollector::RelocateOldPointerObject);
2869   int live_data_olds_size = IterateLiveObjects(
2870       heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
2871   int live_codes_size = IterateLiveObjects(
2872       heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
2873   int live_cells_size = IterateLiveObjects(
2874       heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
2875   int live_news_size = IterateLiveObjects(
2876       heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
2877 
2878   USE(live_maps_size);
2879   USE(live_pointer_olds_size);
2880   USE(live_data_olds_size);
2881   USE(live_codes_size);
2882   USE(live_cells_size);
2883   USE(live_news_size);
2884   ASSERT(live_maps_size == live_map_objects_size_);
2885   ASSERT(live_data_olds_size == live_old_data_objects_size_);
2886   ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2887   ASSERT(live_codes_size == live_code_objects_size_);
2888   ASSERT(live_cells_size == live_cell_objects_size_);
2889   ASSERT(live_news_size == live_young_objects_size_);
2890 
2891   // Flip from and to spaces
2892   heap()->new_space()->Flip();
2893 
2894   heap()->new_space()->MCCommitRelocationInfo();
2895 
2896   // Set age_mark to bottom in to space
2897   Address mark = heap()->new_space()->bottom();
2898   heap()->new_space()->set_age_mark(mark);
2899 
2900   PagedSpaces spaces;
2901   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2902     space->MCCommitRelocationInfo();
2903 
2904   heap()->CheckNewSpaceExpansionCriteria();
2905   heap()->IncrementYoungSurvivorsCounter(live_news_size);
2906 }
2907 
2908 
RelocateMapObject(HeapObject * obj)2909 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2910   // Recover map pointer.
2911   MapWord encoding = obj->map_word();
2912   Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
2913   ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
2914 
2915   // Get forwarding address before resetting map pointer
2916   Address new_addr = GetForwardingAddressInOldSpace(obj);
2917 
2918   // Reset map pointer.  The meta map object may not be copied yet so
2919   // Map::cast does not yet work.
2920   obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2921 
2922   Address old_addr = obj->address();
2923 
2924   if (new_addr != old_addr) {
2925     // Move contents.
2926     heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2927                                                    old_addr,
2928                                                    Map::kSize);
2929   }
2930 
2931 #ifdef DEBUG
2932   if (FLAG_gc_verbose) {
2933     PrintF("relocate %p -> %p\n", old_addr, new_addr);
2934   }
2935 #endif
2936 
2937   return Map::kSize;
2938 }
2939 
2940 
RestoreMap(HeapObject * obj,PagedSpace * space,Address new_addr,Address map_addr)2941 static inline int RestoreMap(HeapObject* obj,
2942                              PagedSpace* space,
2943                              Address new_addr,
2944                              Address map_addr) {
2945   // This must be a non-map object, and the function relies on the
2946   // assumption that the Map space is compacted before the other paged
2947   // spaces (see RelocateObjects).
2948 
2949   // Reset map pointer.
2950   obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2951 
2952   int obj_size = obj->Size();
2953   ASSERT_OBJECT_SIZE(obj_size);
2954 
2955   ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2956          space->MCSpaceOffsetForAddress(obj->address()));
2957 
2958 #ifdef DEBUG
2959   if (FLAG_gc_verbose) {
2960     PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2961   }
2962 #endif
2963 
2964   return obj_size;
2965 }
2966 
2967 
RelocateOldNonCodeObject(HeapObject * obj,PagedSpace * space)2968 int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2969                                                    PagedSpace* space) {
2970   // Recover map pointer.
2971   MapWord encoding = obj->map_word();
2972   Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
2973   ASSERT(heap()->map_space()->Contains(map_addr));
2974 
2975   // Get forwarding address before resetting map pointer.
2976   Address new_addr = GetForwardingAddressInOldSpace(obj);
2977 
2978   // Reset the map pointer.
2979   int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2980 
2981   Address old_addr = obj->address();
2982 
2983   if (new_addr != old_addr) {
2984     // Move contents.
2985     if (space == heap()->old_data_space()) {
2986       heap()->MoveBlock(new_addr, old_addr, obj_size);
2987     } else {
2988       heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2989                                                      old_addr,
2990                                                      obj_size);
2991     }
2992   }
2993 
2994   ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2995 
2996   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2997   if (copied_to->IsSharedFunctionInfo()) {
2998     PROFILE(heap()->isolate(),
2999             SharedFunctionInfoMoveEvent(old_addr, new_addr));
3000   }
3001   HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3002 
3003   return obj_size;
3004 }
3005 
3006 
RelocateOldPointerObject(HeapObject * obj)3007 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
3008   return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
3009 }
3010 
3011 
RelocateOldDataObject(HeapObject * obj)3012 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
3013   return RelocateOldNonCodeObject(obj, heap()->old_data_space());
3014 }
3015 
3016 
RelocateCellObject(HeapObject * obj)3017 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
3018   return RelocateOldNonCodeObject(obj, heap()->cell_space());
3019 }
3020 
3021 
RelocateCodeObject(HeapObject * obj)3022 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
3023   // Recover map pointer.
3024   MapWord encoding = obj->map_word();
3025   Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3026   ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3027 
3028   // Get forwarding address before resetting map pointer
3029   Address new_addr = GetForwardingAddressInOldSpace(obj);
3030 
3031   // Reset the map pointer.
3032   int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
3033 
3034   Address old_addr = obj->address();
3035 
3036   if (new_addr != old_addr) {
3037     // Move contents.
3038     heap()->MoveBlock(new_addr, old_addr, obj_size);
3039   }
3040 
3041   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3042   if (copied_to->IsCode()) {
3043     // May also update inline cache target.
3044     Code::cast(copied_to)->Relocate(new_addr - old_addr);
3045     // Notify the logger that compiled code has moved.
3046     PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
3047   }
3048   HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3049 
3050   return obj_size;
3051 }
3052 
3053 
RelocateNewObject(HeapObject * obj)3054 int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3055   int obj_size = obj->Size();
3056 
3057   // Get forwarding address
3058   Address old_addr = obj->address();
3059   int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
3060 
3061   Address new_addr =
3062     Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
3063 
3064 #ifdef DEBUG
3065   if (heap()->new_space()->FromSpaceContains(new_addr)) {
3066     ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3067            heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
3068   } else {
3069     ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3070            heap()->TargetSpace(obj) == heap()->old_data_space());
3071   }
3072 #endif
3073 
3074   // New and old addresses cannot overlap.
3075   if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3076     heap()->CopyBlock(new_addr, old_addr, obj_size);
3077   } else {
3078     heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3079                                                    old_addr,
3080                                                    obj_size);
3081   }
3082 
3083 #ifdef DEBUG
3084   if (FLAG_gc_verbose) {
3085     PrintF("relocate %p -> %p\n", old_addr, new_addr);
3086   }
3087 #endif
3088 
3089   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3090   if (copied_to->IsSharedFunctionInfo()) {
3091     PROFILE(heap()->isolate(),
3092             SharedFunctionInfoMoveEvent(old_addr, new_addr));
3093   }
3094   HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3095 
3096   return obj_size;
3097 }
3098 
3099 
EnableCodeFlushing(bool enable)3100 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3101   if (enable) {
3102     if (code_flusher_ != NULL) return;
3103     code_flusher_ = new CodeFlusher(heap()->isolate());
3104   } else {
3105     if (code_flusher_ == NULL) return;
3106     delete code_flusher_;
3107     code_flusher_ = NULL;
3108   }
3109 }
3110 
3111 
ReportDeleteIfNeeded(HeapObject * obj,Isolate * isolate)3112 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3113                                                 Isolate* isolate) {
3114 #ifdef ENABLE_GDB_JIT_INTERFACE
3115   if (obj->IsCode()) {
3116     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3117   }
3118 #endif
3119 #ifdef ENABLE_LOGGING_AND_PROFILING
3120   if (obj->IsCode()) {
3121     PROFILE(isolate, CodeDeleteEvent(obj->address()));
3122   }
3123 #endif
3124 }
3125 
3126 
SizeOfMarkedObject(HeapObject * obj)3127 int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3128   MapWord map_word = obj->map_word();
3129   map_word.ClearMark();
3130   return obj->SizeFromMap(map_word.ToMap());
3131 }
3132 
3133 
Initialize()3134 void MarkCompactCollector::Initialize() {
3135   StaticPointersToNewGenUpdatingVisitor::Initialize();
3136   StaticMarkingVisitor::Initialize();
3137 }
3138 
3139 
3140 } }  // namespace v8::internal
3141