1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/incremental-marking.h"
8
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/objects-visiting.h"
13 #include "src/objects-visiting-inl.h"
14
15 namespace v8 {
16 namespace internal {
17
18
IncrementalMarking(Heap * heap)19 IncrementalMarking::IncrementalMarking(Heap* heap)
20 : heap_(heap),
21 state_(STOPPED),
22 marking_deque_memory_(NULL),
23 marking_deque_memory_committed_(false),
24 steps_count_(0),
25 steps_took_(0),
26 longest_step_(0.0),
27 old_generation_space_available_at_start_of_incremental_(0),
28 old_generation_space_used_at_start_of_incremental_(0),
29 steps_count_since_last_gc_(0),
30 steps_took_since_last_gc_(0),
31 should_hurry_(false),
32 marking_speed_(0),
33 allocated_(0),
34 no_marking_scope_depth_(0),
35 unscanned_bytes_of_large_object_(0) {
36 }
37
38
TearDown()39 void IncrementalMarking::TearDown() {
40 delete marking_deque_memory_;
41 }
42
43
RecordWriteSlow(HeapObject * obj,Object ** slot,Object * value)44 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
45 Object** slot,
46 Object* value) {
47 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
48 MarkBit obj_bit = Marking::MarkBitFrom(obj);
49 if (Marking::IsBlack(obj_bit)) {
50 // Object is not going to be rescanned we need to record the slot.
51 heap_->mark_compact_collector()->RecordSlot(
52 HeapObject::RawField(obj, 0), slot, value);
53 }
54 }
55 }
56
57
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)58 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
59 Object** slot,
60 Isolate* isolate) {
61 ASSERT(obj->IsHeapObject());
62 IncrementalMarking* marking = isolate->heap()->incremental_marking();
63
64 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
65 int counter = chunk->write_barrier_counter();
66 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
67 marking->write_barriers_invoked_since_last_step_ +=
68 MemoryChunk::kWriteBarrierCounterGranularity -
69 chunk->write_barrier_counter();
70 chunk->set_write_barrier_counter(
71 MemoryChunk::kWriteBarrierCounterGranularity);
72 }
73
74 marking->RecordWrite(obj, slot, *slot);
75 }
76
77
RecordCodeTargetPatch(Code * host,Address pc,HeapObject * value)78 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
79 Address pc,
80 HeapObject* value) {
81 if (IsMarking()) {
82 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
83 RecordWriteIntoCode(host, &rinfo, value);
84 }
85 }
86
87
RecordCodeTargetPatch(Address pc,HeapObject * value)88 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
89 if (IsMarking()) {
90 Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
91 GcSafeFindCodeForInnerPointer(pc);
92 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
93 RecordWriteIntoCode(host, &rinfo, value);
94 }
95 }
96
97
RecordWriteOfCodeEntrySlow(JSFunction * host,Object ** slot,Code * value)98 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
99 Object** slot,
100 Code* value) {
101 if (BaseRecordWrite(host, slot, value)) {
102 ASSERT(slot != NULL);
103 heap_->mark_compact_collector()->
104 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
105 }
106 }
107
108
RecordWriteIntoCodeSlow(HeapObject * obj,RelocInfo * rinfo,Object * value)109 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
110 RelocInfo* rinfo,
111 Object* value) {
112 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
113 if (Marking::IsWhite(value_bit)) {
114 MarkBit obj_bit = Marking::MarkBitFrom(obj);
115 if (Marking::IsBlack(obj_bit)) {
116 BlackToGreyAndUnshift(obj, obj_bit);
117 RestartIfNotMarking();
118 }
119 // Object is either grey or white. It will be scanned if survives.
120 return;
121 }
122
123 if (is_compacting_) {
124 MarkBit obj_bit = Marking::MarkBitFrom(obj);
125 if (Marking::IsBlack(obj_bit)) {
126 // Object is not going to be rescanned. We need to record the slot.
127 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
128 Code::cast(value));
129 }
130 }
131 }
132
133
MarkObjectGreyDoNotEnqueue(Object * obj)134 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
135 if (obj->IsHeapObject()) {
136 HeapObject* heap_obj = HeapObject::cast(obj);
137 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
138 if (Marking::IsBlack(mark_bit)) {
139 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
140 -heap_obj->Size());
141 }
142 Marking::AnyToGrey(mark_bit);
143 }
144 }
145
146
MarkBlackOrKeepGrey(HeapObject * heap_object,MarkBit mark_bit,int size)147 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
148 MarkBit mark_bit,
149 int size) {
150 ASSERT(!Marking::IsImpossible(mark_bit));
151 if (mark_bit.Get()) return;
152 mark_bit.Set();
153 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
154 ASSERT(Marking::IsBlack(mark_bit));
155 }
156
157
MarkBlackOrKeepBlack(HeapObject * heap_object,MarkBit mark_bit,int size)158 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
159 MarkBit mark_bit,
160 int size) {
161 ASSERT(!Marking::IsImpossible(mark_bit));
162 if (Marking::IsBlack(mark_bit)) return;
163 Marking::MarkBlack(mark_bit);
164 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
165 ASSERT(Marking::IsBlack(mark_bit));
166 }
167
168
169 class IncrementalMarkingMarkingVisitor
170 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
171 public:
Initialize()172 static void Initialize() {
173 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
174 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
175 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
176 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
177 }
178
179 static const int kProgressBarScanningChunk = 32 * 1024;
180
VisitFixedArrayIncremental(Map * map,HeapObject * object)181 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
182 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
183 // TODO(mstarzinger): Move setting of the flag to the allocation site of
184 // the array. The visitor should just check the flag.
185 if (FLAG_use_marking_progress_bar &&
186 chunk->owner()->identity() == LO_SPACE) {
187 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
188 }
189 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
190 Heap* heap = map->GetHeap();
191 // When using a progress bar for large fixed arrays, scan only a chunk of
192 // the array and try to push it onto the marking deque again until it is
193 // fully scanned. Fall back to scanning it through to the end in case this
194 // fails because of a full deque.
195 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
196 int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
197 chunk->progress_bar());
198 int end_offset = Min(object_size,
199 start_offset + kProgressBarScanningChunk);
200 int already_scanned_offset = start_offset;
201 bool scan_until_end = false;
202 do {
203 VisitPointersWithAnchor(heap,
204 HeapObject::RawField(object, 0),
205 HeapObject::RawField(object, start_offset),
206 HeapObject::RawField(object, end_offset));
207 start_offset = end_offset;
208 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
209 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
210 } while (scan_until_end && start_offset < object_size);
211 chunk->set_progress_bar(start_offset);
212 if (start_offset < object_size) {
213 heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
214 heap->incremental_marking()->NotifyIncompleteScanOfObject(
215 object_size - (start_offset - already_scanned_offset));
216 }
217 } else {
218 FixedArrayVisitor::Visit(map, object);
219 }
220 }
221
VisitNativeContextIncremental(Map * map,HeapObject * object)222 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
223 Context* context = Context::cast(object);
224
225 // We will mark cache black with a separate pass when we finish marking.
226 // Note that GC can happen when the context is not fully initialized,
227 // so the cache can be undefined.
228 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
229 if (!cache->IsUndefined()) {
230 MarkObjectGreyDoNotEnqueue(cache);
231 }
232 VisitNativeContext(map, context);
233 }
234
INLINE(static void VisitPointer (Heap * heap,Object ** p))235 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
236 Object* obj = *p;
237 if (obj->IsHeapObject()) {
238 heap->mark_compact_collector()->RecordSlot(p, p, obj);
239 MarkObject(heap, obj);
240 }
241 }
242
INLINE(static void VisitPointers (Heap * heap,Object ** start,Object ** end))243 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
244 for (Object** p = start; p < end; p++) {
245 Object* obj = *p;
246 if (obj->IsHeapObject()) {
247 heap->mark_compact_collector()->RecordSlot(start, p, obj);
248 MarkObject(heap, obj);
249 }
250 }
251 }
252
INLINE(static void VisitPointersWithAnchor (Heap * heap,Object ** anchor,Object ** start,Object ** end))253 INLINE(static void VisitPointersWithAnchor(Heap* heap,
254 Object** anchor,
255 Object** start,
256 Object** end)) {
257 for (Object** p = start; p < end; p++) {
258 Object* obj = *p;
259 if (obj->IsHeapObject()) {
260 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
261 MarkObject(heap, obj);
262 }
263 }
264 }
265
266 // Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,Object * obj))267 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
268 HeapObject* heap_object = HeapObject::cast(obj);
269 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
270 if (mark_bit.data_only()) {
271 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
272 } else if (Marking::IsWhite(mark_bit)) {
273 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
274 }
275 }
276
277 // Marks the object black without pushing it on the marking stack.
278 // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,Object * obj))279 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
280 HeapObject* heap_object = HeapObject::cast(obj);
281 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
282 if (Marking::IsWhite(mark_bit)) {
283 mark_bit.Set();
284 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
285 heap_object->Size());
286 return true;
287 }
288 return false;
289 }
290 };
291
292
293 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
294 public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)295 explicit IncrementalMarkingRootMarkingVisitor(
296 IncrementalMarking* incremental_marking)
297 : incremental_marking_(incremental_marking) {
298 }
299
VisitPointer(Object ** p)300 void VisitPointer(Object** p) {
301 MarkObjectByPointer(p);
302 }
303
VisitPointers(Object ** start,Object ** end)304 void VisitPointers(Object** start, Object** end) {
305 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
306 }
307
308 private:
MarkObjectByPointer(Object ** p)309 void MarkObjectByPointer(Object** p) {
310 Object* obj = *p;
311 if (!obj->IsHeapObject()) return;
312
313 HeapObject* heap_object = HeapObject::cast(obj);
314 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
315 if (mark_bit.data_only()) {
316 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
317 } else {
318 if (Marking::IsWhite(mark_bit)) {
319 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
320 }
321 }
322 }
323
324 IncrementalMarking* incremental_marking_;
325 };
326
327
Initialize()328 void IncrementalMarking::Initialize() {
329 IncrementalMarkingMarkingVisitor::Initialize();
330 }
331
332
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking,bool is_compacting)333 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
334 bool is_marking,
335 bool is_compacting) {
336 if (is_marking) {
337 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
338 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
339
340 // It's difficult to filter out slots recorded for large objects.
341 if (chunk->owner()->identity() == LO_SPACE &&
342 chunk->size() > static_cast<size_t>(Page::kPageSize) &&
343 is_compacting) {
344 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
345 }
346 } else if (chunk->owner()->identity() == CELL_SPACE ||
347 chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
348 chunk->scan_on_scavenge()) {
349 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
350 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
351 } else {
352 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
353 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
354 }
355 }
356
357
SetNewSpacePageFlags(NewSpacePage * chunk,bool is_marking)358 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
359 bool is_marking) {
360 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
361 if (is_marking) {
362 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
363 } else {
364 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
365 }
366 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
367 }
368
369
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)370 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
371 PagedSpace* space) {
372 PageIterator it(space);
373 while (it.has_next()) {
374 Page* p = it.next();
375 SetOldSpacePageFlags(p, false, false);
376 }
377 }
378
379
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)380 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
381 NewSpace* space) {
382 NewSpacePageIterator it(space);
383 while (it.has_next()) {
384 NewSpacePage* p = it.next();
385 SetNewSpacePageFlags(p, false);
386 }
387 }
388
389
DeactivateIncrementalWriteBarrier()390 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
391 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
392 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
393 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
394 DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
395 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
396 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
397 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
398
399 LargePage* lop = heap_->lo_space()->first_page();
400 while (lop->is_valid()) {
401 SetOldSpacePageFlags(lop, false, false);
402 lop = lop->next_page();
403 }
404 }
405
406
ActivateIncrementalWriteBarrier(PagedSpace * space)407 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
408 PageIterator it(space);
409 while (it.has_next()) {
410 Page* p = it.next();
411 SetOldSpacePageFlags(p, true, is_compacting_);
412 }
413 }
414
415
ActivateIncrementalWriteBarrier(NewSpace * space)416 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
417 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
418 while (it.has_next()) {
419 NewSpacePage* p = it.next();
420 SetNewSpacePageFlags(p, true);
421 }
422 }
423
424
ActivateIncrementalWriteBarrier()425 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
426 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
427 ActivateIncrementalWriteBarrier(heap_->old_data_space());
428 ActivateIncrementalWriteBarrier(heap_->cell_space());
429 ActivateIncrementalWriteBarrier(heap_->property_cell_space());
430 ActivateIncrementalWriteBarrier(heap_->map_space());
431 ActivateIncrementalWriteBarrier(heap_->code_space());
432 ActivateIncrementalWriteBarrier(heap_->new_space());
433
434 LargePage* lop = heap_->lo_space()->first_page();
435 while (lop->is_valid()) {
436 SetOldSpacePageFlags(lop, true, is_compacting_);
437 lop = lop->next_page();
438 }
439 }
440
441
WorthActivating()442 bool IncrementalMarking::WorthActivating() {
443 #ifndef DEBUG
444 static const intptr_t kActivationThreshold = 8 * MB;
445 #else
446 // TODO(gc) consider setting this to some low level so that some
447 // debug tests run with incremental marking and some without.
448 static const intptr_t kActivationThreshold = 0;
449 #endif
450 // Only start incremental marking in a safe state: 1) when incremental
451 // marking is turned on, 2) when we are currently not in a GC, and
452 // 3) when we are currently not serializing or deserializing the heap.
453 return FLAG_incremental_marking &&
454 FLAG_incremental_marking_steps &&
455 heap_->gc_state() == Heap::NOT_IN_GC &&
456 !heap_->isolate()->serializer_enabled() &&
457 heap_->isolate()->IsInitialized() &&
458 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
459 }
460
461
ActivateGeneratedStub(Code * stub)462 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
463 ASSERT(RecordWriteStub::GetMode(stub) ==
464 RecordWriteStub::STORE_BUFFER_ONLY);
465
466 if (!IsMarking()) {
467 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
468 // we don't need to do anything if incremental marking is
469 // not active.
470 } else if (IsCompacting()) {
471 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
472 } else {
473 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
474 }
475 }
476
477
PatchIncrementalMarkingRecordWriteStubs(Heap * heap,RecordWriteStub::Mode mode)478 static void PatchIncrementalMarkingRecordWriteStubs(
479 Heap* heap, RecordWriteStub::Mode mode) {
480 UnseededNumberDictionary* stubs = heap->code_stubs();
481
482 int capacity = stubs->Capacity();
483 for (int i = 0; i < capacity; i++) {
484 Object* k = stubs->KeyAt(i);
485 if (stubs->IsKey(k)) {
486 uint32_t key = NumberToUint32(k);
487
488 if (CodeStub::MajorKeyFromKey(key) ==
489 CodeStub::RecordWrite) {
490 Object* e = stubs->ValueAt(i);
491 if (e->IsCode()) {
492 RecordWriteStub::Patch(Code::cast(e), mode);
493 }
494 }
495 }
496 }
497 }
498
499
EnsureMarkingDequeIsCommitted()500 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
501 if (marking_deque_memory_ == NULL) {
502 marking_deque_memory_ = new VirtualMemory(4 * MB);
503 }
504 if (!marking_deque_memory_committed_) {
505 bool success = marking_deque_memory_->Commit(
506 reinterpret_cast<Address>(marking_deque_memory_->address()),
507 marking_deque_memory_->size(),
508 false); // Not executable.
509 CHECK(success);
510 marking_deque_memory_committed_ = true;
511 }
512 }
513
514
UncommitMarkingDeque()515 void IncrementalMarking::UncommitMarkingDeque() {
516 if (state_ == STOPPED && marking_deque_memory_committed_) {
517 bool success = marking_deque_memory_->Uncommit(
518 reinterpret_cast<Address>(marking_deque_memory_->address()),
519 marking_deque_memory_->size());
520 CHECK(success);
521 marking_deque_memory_committed_ = false;
522 }
523 }
524
525
Start(CompactionFlag flag)526 void IncrementalMarking::Start(CompactionFlag flag) {
527 if (FLAG_trace_incremental_marking) {
528 PrintF("[IncrementalMarking] Start\n");
529 }
530 ASSERT(FLAG_incremental_marking);
531 ASSERT(FLAG_incremental_marking_steps);
532 ASSERT(state_ == STOPPED);
533 ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
534 ASSERT(!heap_->isolate()->serializer_enabled());
535 ASSERT(heap_->isolate()->IsInitialized());
536
537 ResetStepCounters();
538
539 if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
540 StartMarking(flag);
541 } else {
542 if (FLAG_trace_incremental_marking) {
543 PrintF("[IncrementalMarking] Start sweeping.\n");
544 }
545 state_ = SWEEPING;
546 }
547
548 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
549 }
550
551
StartMarking(CompactionFlag flag)552 void IncrementalMarking::StartMarking(CompactionFlag flag) {
553 if (FLAG_trace_incremental_marking) {
554 PrintF("[IncrementalMarking] Start marking\n");
555 }
556
557 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
558 heap_->mark_compact_collector()->StartCompaction(
559 MarkCompactCollector::INCREMENTAL_COMPACTION);
560
561 state_ = MARKING;
562
563 RecordWriteStub::Mode mode = is_compacting_ ?
564 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
565
566 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
567
568 EnsureMarkingDequeIsCommitted();
569
570 // Initialize marking stack.
571 Address addr = static_cast<Address>(marking_deque_memory_->address());
572 size_t size = marking_deque_memory_->size();
573 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
574 marking_deque_.Initialize(addr, addr + size);
575
576 ActivateIncrementalWriteBarrier();
577
578 // Marking bits are cleared by the sweeper.
579 #ifdef VERIFY_HEAP
580 if (FLAG_verify_heap) {
581 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
582 }
583 #endif
584
585 heap_->CompletelyClearInstanceofCache();
586 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
587
588 if (FLAG_cleanup_code_caches_at_gc) {
589 // We will mark cache black with a separate pass
590 // when we finish marking.
591 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
592 }
593
594 // Mark strong roots grey.
595 IncrementalMarkingRootMarkingVisitor visitor(this);
596 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
597
598 heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
599
600 // Ready to start incremental marking.
601 if (FLAG_trace_incremental_marking) {
602 PrintF("[IncrementalMarking] Running\n");
603 }
604 }
605
606
PrepareForScavenge()607 void IncrementalMarking::PrepareForScavenge() {
608 if (!IsMarking()) return;
609 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
610 heap_->new_space()->FromSpaceEnd());
611 while (it.has_next()) {
612 Bitmap::Clear(it.next());
613 }
614 }
615
616
UpdateMarkingDequeAfterScavenge()617 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
618 if (!IsMarking()) return;
619
620 int current = marking_deque_.bottom();
621 int mask = marking_deque_.mask();
622 int limit = marking_deque_.top();
623 HeapObject** array = marking_deque_.array();
624 int new_top = current;
625
626 Map* filler_map = heap_->one_pointer_filler_map();
627
628 while (current != limit) {
629 HeapObject* obj = array[current];
630 ASSERT(obj->IsHeapObject());
631 current = ((current + 1) & mask);
632 if (heap_->InNewSpace(obj)) {
633 MapWord map_word = obj->map_word();
634 if (map_word.IsForwardingAddress()) {
635 HeapObject* dest = map_word.ToForwardingAddress();
636 array[new_top] = dest;
637 new_top = ((new_top + 1) & mask);
638 ASSERT(new_top != marking_deque_.bottom());
639 #ifdef DEBUG
640 MarkBit mark_bit = Marking::MarkBitFrom(obj);
641 ASSERT(Marking::IsGrey(mark_bit) ||
642 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
643 #endif
644 }
645 } else if (obj->map() != filler_map) {
646 // Skip one word filler objects that appear on the
647 // stack when we perform in place array shift.
648 array[new_top] = obj;
649 new_top = ((new_top + 1) & mask);
650 ASSERT(new_top != marking_deque_.bottom());
651 #ifdef DEBUG
652 MarkBit mark_bit = Marking::MarkBitFrom(obj);
653 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
654 ASSERT(Marking::IsGrey(mark_bit) ||
655 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
656 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
657 Marking::IsBlack(mark_bit)));
658 #endif
659 }
660 }
661 marking_deque_.set_top(new_top);
662
663 steps_took_since_last_gc_ = 0;
664 steps_count_since_last_gc_ = 0;
665 longest_step_ = 0.0;
666 }
667
668
VisitObject(Map * map,HeapObject * obj,int size)669 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
670 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
671 if (Marking::IsWhite(map_mark_bit)) {
672 WhiteToGreyAndPush(map, map_mark_bit);
673 }
674
675 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
676
677 MarkBit mark_bit = Marking::MarkBitFrom(obj);
678 #if ENABLE_SLOW_ASSERTS
679 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
680 SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
681 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
682 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
683 Marking::IsBlack(mark_bit)));
684 #endif
685 MarkBlackOrKeepBlack(obj, mark_bit, size);
686 }
687
688
ProcessMarkingDeque(intptr_t bytes_to_process)689 void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
690 Map* filler_map = heap_->one_pointer_filler_map();
691 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
692 HeapObject* obj = marking_deque_.Pop();
693
694 // Explicitly skip one word fillers. Incremental markbit patterns are
695 // correct only for objects that occupy at least two words.
696 Map* map = obj->map();
697 if (map == filler_map) continue;
698
699 int size = obj->SizeFromMap(map);
700 unscanned_bytes_of_large_object_ = 0;
701 VisitObject(map, obj, size);
702 bytes_to_process -= (size - unscanned_bytes_of_large_object_);
703 }
704 }
705
706
ProcessMarkingDeque()707 void IncrementalMarking::ProcessMarkingDeque() {
708 Map* filler_map = heap_->one_pointer_filler_map();
709 while (!marking_deque_.IsEmpty()) {
710 HeapObject* obj = marking_deque_.Pop();
711
712 // Explicitly skip one word fillers. Incremental markbit patterns are
713 // correct only for objects that occupy at least two words.
714 Map* map = obj->map();
715 if (map == filler_map) continue;
716
717 VisitObject(map, obj, obj->SizeFromMap(map));
718 }
719 }
720
721
Hurry()722 void IncrementalMarking::Hurry() {
723 if (state() == MARKING) {
724 double start = 0.0;
725 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
726 start = OS::TimeCurrentMillis();
727 if (FLAG_trace_incremental_marking) {
728 PrintF("[IncrementalMarking] Hurry\n");
729 }
730 }
731 // TODO(gc) hurry can mark objects it encounters black as mutator
732 // was stopped.
733 ProcessMarkingDeque();
734 state_ = COMPLETE;
735 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
736 double end = OS::TimeCurrentMillis();
737 double delta = end - start;
738 heap_->AddMarkingTime(delta);
739 if (FLAG_trace_incremental_marking) {
740 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
741 static_cast<int>(delta));
742 }
743 }
744 }
745
746 if (FLAG_cleanup_code_caches_at_gc) {
747 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
748 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
749 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
750 PolymorphicCodeCache::kSize);
751 }
752
753 Object* context = heap_->native_contexts_list();
754 while (!context->IsUndefined()) {
755 // GC can happen when the context is not fully initialized,
756 // so the cache can be undefined.
757 HeapObject* cache = HeapObject::cast(
758 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
759 if (!cache->IsUndefined()) {
760 MarkBit mark_bit = Marking::MarkBitFrom(cache);
761 if (Marking::IsGrey(mark_bit)) {
762 Marking::GreyToBlack(mark_bit);
763 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
764 }
765 }
766 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
767 }
768 }
769
770
Abort()771 void IncrementalMarking::Abort() {
772 if (IsStopped()) return;
773 if (FLAG_trace_incremental_marking) {
774 PrintF("[IncrementalMarking] Aborting.\n");
775 }
776 heap_->new_space()->LowerInlineAllocationLimit(0);
777 IncrementalMarking::set_should_hurry(false);
778 ResetStepCounters();
779 if (IsMarking()) {
780 PatchIncrementalMarkingRecordWriteStubs(heap_,
781 RecordWriteStub::STORE_BUFFER_ONLY);
782 DeactivateIncrementalWriteBarrier();
783
784 if (is_compacting_) {
785 LargeObjectIterator it(heap_->lo_space());
786 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
787 Page* p = Page::FromAddress(obj->address());
788 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
789 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
790 }
791 }
792 }
793 }
794 heap_->isolate()->stack_guard()->ClearGC();
795 state_ = STOPPED;
796 is_compacting_ = false;
797 }
798
799
Finalize()800 void IncrementalMarking::Finalize() {
801 Hurry();
802 state_ = STOPPED;
803 is_compacting_ = false;
804 heap_->new_space()->LowerInlineAllocationLimit(0);
805 IncrementalMarking::set_should_hurry(false);
806 ResetStepCounters();
807 PatchIncrementalMarkingRecordWriteStubs(heap_,
808 RecordWriteStub::STORE_BUFFER_ONLY);
809 DeactivateIncrementalWriteBarrier();
810 ASSERT(marking_deque_.IsEmpty());
811 heap_->isolate()->stack_guard()->ClearGC();
812 }
813
814
MarkingComplete(CompletionAction action)815 void IncrementalMarking::MarkingComplete(CompletionAction action) {
816 state_ = COMPLETE;
817 // We will set the stack guard to request a GC now. This will mean the rest
818 // of the GC gets performed as soon as possible (we can't do a GC here in a
819 // record-write context). If a few things get allocated between now and then
820 // that shouldn't make us do a scavenge and keep being incremental, so we set
821 // the should-hurry flag to indicate that there can't be much work left to do.
822 set_should_hurry(true);
823 if (FLAG_trace_incremental_marking) {
824 PrintF("[IncrementalMarking] Complete (normal).\n");
825 }
826 if (action == GC_VIA_STACK_GUARD) {
827 heap_->isolate()->stack_guard()->RequestGC();
828 }
829 }
830
831
OldSpaceStep(intptr_t allocated)832 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
833 if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
834 // TODO(hpayer): Let's play safe for now, but compaction should be
835 // in principle possible.
836 Start(PREVENT_COMPACTION);
837 } else {
838 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
839 }
840 }
841
842
Step(intptr_t allocated_bytes,CompletionAction action)843 void IncrementalMarking::Step(intptr_t allocated_bytes,
844 CompletionAction action) {
845 if (heap_->gc_state() != Heap::NOT_IN_GC ||
846 !FLAG_incremental_marking ||
847 !FLAG_incremental_marking_steps ||
848 (state_ != SWEEPING && state_ != MARKING)) {
849 return;
850 }
851
852 allocated_ += allocated_bytes;
853
854 if (allocated_ < kAllocatedThreshold &&
855 write_barriers_invoked_since_last_step_ <
856 kWriteBarriersInvokedThreshold) {
857 return;
858 }
859
860 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
861
862 // The marking speed is driven either by the allocation rate or by the rate
863 // at which we are having to check the color of objects in the write barrier.
864 // It is possible for a tight non-allocating loop to run a lot of write
865 // barriers before we get here and check them (marking can only take place on
866 // allocation), so to reduce the lumpiness we don't use the write barriers
867 // invoked since last step directly to determine the amount of work to do.
868 intptr_t bytes_to_process =
869 marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
870 allocated_ = 0;
871 write_barriers_invoked_since_last_step_ = 0;
872
873 bytes_scanned_ += bytes_to_process;
874
875 double start = 0;
876
877 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
878 FLAG_print_cumulative_gc_stat) {
879 start = OS::TimeCurrentMillis();
880 }
881
882 if (state_ == SWEEPING) {
883 if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
884 heap_->mark_compact_collector()->IsSweepingCompleted()) {
885 heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
886 }
887 if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
888 bytes_scanned_ = 0;
889 StartMarking(PREVENT_COMPACTION);
890 }
891 } else if (state_ == MARKING) {
892 ProcessMarkingDeque(bytes_to_process);
893 if (marking_deque_.IsEmpty()) MarkingComplete(action);
894 }
895
896 steps_count_++;
897 steps_count_since_last_gc_++;
898
899 bool speed_up = false;
900
901 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
902 if (FLAG_trace_gc) {
903 PrintPID("Speed up marking after %d steps\n",
904 static_cast<int>(kMarkingSpeedAccellerationInterval));
905 }
906 speed_up = true;
907 }
908
909 bool space_left_is_very_small =
910 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
911
912 bool only_1_nth_of_space_that_was_available_still_left =
913 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
914 old_generation_space_available_at_start_of_incremental_);
915
916 if (space_left_is_very_small ||
917 only_1_nth_of_space_that_was_available_still_left) {
918 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
919 speed_up = true;
920 }
921
922 bool size_of_old_space_multiplied_by_n_during_marking =
923 (heap_->PromotedTotalSize() >
924 (marking_speed_ + 1) *
925 old_generation_space_used_at_start_of_incremental_);
926 if (size_of_old_space_multiplied_by_n_during_marking) {
927 speed_up = true;
928 if (FLAG_trace_gc) {
929 PrintPID("Speed up marking because of heap size increase\n");
930 }
931 }
932
933 int64_t promoted_during_marking = heap_->PromotedTotalSize()
934 - old_generation_space_used_at_start_of_incremental_;
935 intptr_t delay = marking_speed_ * MB;
936 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
937
938 // We try to scan at at least twice the speed that we are allocating.
939 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
940 if (FLAG_trace_gc) {
941 PrintPID("Speed up marking because marker was not keeping up\n");
942 }
943 speed_up = true;
944 }
945
946 if (speed_up) {
947 if (state_ != MARKING) {
948 if (FLAG_trace_gc) {
949 PrintPID("Postponing speeding up marking until marking starts\n");
950 }
951 } else {
952 marking_speed_ += kMarkingSpeedAccelleration;
953 marking_speed_ = static_cast<int>(
954 Min(kMaxMarkingSpeed,
955 static_cast<intptr_t>(marking_speed_ * 1.3)));
956 if (FLAG_trace_gc) {
957 PrintPID("Marking speed increased to %d\n", marking_speed_);
958 }
959 }
960 }
961
962 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
963 FLAG_print_cumulative_gc_stat) {
964 double end = OS::TimeCurrentMillis();
965 double delta = (end - start);
966 longest_step_ = Max(longest_step_, delta);
967 steps_took_ += delta;
968 steps_took_since_last_gc_ += delta;
969 heap_->AddMarkingTime(delta);
970 }
971 }
972
973
ResetStepCounters()974 void IncrementalMarking::ResetStepCounters() {
975 steps_count_ = 0;
976 steps_took_ = 0;
977 longest_step_ = 0.0;
978 old_generation_space_available_at_start_of_incremental_ =
979 SpaceLeftInOldSpace();
980 old_generation_space_used_at_start_of_incremental_ =
981 heap_->PromotedTotalSize();
982 steps_count_since_last_gc_ = 0;
983 steps_took_since_last_gc_ = 0;
984 bytes_rescanned_ = 0;
985 marking_speed_ = kInitialMarkingSpeed;
986 bytes_scanned_ = 0;
987 write_barriers_invoked_since_last_step_ = 0;
988 }
989
990
SpaceLeftInOldSpace()991 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
992 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
993 }
994
995 } } // namespace v8::internal
996