1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_ 6 #define V8_HEAP_INCREMENTAL_MARKING_H_ 7 8 #include "src/cancelable-task.h" 9 #include "src/execution.h" 10 #include "src/heap/heap.h" 11 #include "src/heap/incremental-marking-job.h" 12 #include "src/heap/mark-compact.h" 13 #include "src/heap/spaces.h" 14 #include "src/objects.h" 15 16 namespace v8 { 17 namespace internal { 18 19 // Forward declarations. 20 class MarkBit; 21 class PagedSpace; 22 23 enum class StepOrigin { kV8, kTask }; 24 25 class V8_EXPORT_PRIVATE IncrementalMarking { 26 public: 27 enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; 28 29 enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD }; 30 31 enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION }; 32 33 enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION }; 34 35 explicit IncrementalMarking(Heap* heap); 36 37 static void Initialize(); 38 state()39 State state() { 40 DCHECK(state_ == STOPPED || FLAG_incremental_marking); 41 return state_; 42 } 43 should_hurry()44 bool should_hurry() { return should_hurry_; } set_should_hurry(bool val)45 void set_should_hurry(bool val) { should_hurry_ = val; } 46 finalize_marking_completed()47 bool finalize_marking_completed() const { 48 return finalize_marking_completed_; 49 } 50 SetWeakClosureWasOverApproximatedForTesting(bool val)51 void SetWeakClosureWasOverApproximatedForTesting(bool val) { 52 finalize_marking_completed_ = val; 53 } 54 IsStopped()55 inline bool IsStopped() { return state() == STOPPED; } 56 IsSweeping()57 inline bool IsSweeping() { return state() == SWEEPING; } 58 INLINE(bool IsMarking ())59 INLINE(bool IsMarking()) { return state() >= MARKING; } 60 IsMarkingIncomplete()61 inline bool IsMarkingIncomplete() { return state() == MARKING; } 62 IsComplete()63 inline bool IsComplete() { return state() == COMPLETE; } 64 IsReadyToOverApproximateWeakClosure()65 inline bool IsReadyToOverApproximateWeakClosure() const { 66 return request_type_ == FINALIZATION && !finalize_marking_completed_; 67 } 68 NeedsFinalization()69 inline bool NeedsFinalization() { 70 return IsMarking() && 71 (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING); 72 } 73 request_type()74 GCRequestType request_type() const { return request_type_; } 75 reset_request_type()76 void reset_request_type() { request_type_ = NONE; } 77 78 bool CanBeActivated(); 79 80 bool WasActivated(); 81 82 void Start(GarbageCollectionReason gc_reason); 83 84 void FinalizeIncrementally(); 85 86 void UpdateMarkingDequeAfterScavenge(); 87 88 void Hurry(); 89 90 void Finalize(); 91 92 void Stop(); 93 94 void FinalizeMarking(CompletionAction action); 95 96 void MarkingComplete(CompletionAction action); 97 98 void Epilogue(); 99 100 // Performs incremental marking steps until deadline_in_ms is reached. It 101 // returns the remaining time that cannot be used for incremental marking 102 // anymore because a single step would exceed the deadline. 103 double AdvanceIncrementalMarking(double deadline_in_ms, 104 CompletionAction completion_action, 105 ForceCompletionAction force_completion, 106 StepOrigin step_origin); 107 108 // It's hard to know how much work the incremental marker should do to make 109 // progress in the face of the mutator creating new work for it. We start 110 // of at a moderate rate of work and gradually increase the speed of the 111 // incremental marker until it completes. 112 // Do some marking every time this much memory has been allocated or that many 113 // heavy (color-checking) write barriers have been invoked. 114 static const size_t kAllocatedThreshold = 64 * KB; 115 116 static const int kStepSizeInMs = 1; 117 static const int kMaxStepSizeInMs = 5; 118 119 // This is the upper bound for how many times we allow finalization of 120 // incremental marking to be postponed. 121 static const int kMaxIdleMarkingDelayCounter = 3; 122 123 #ifndef DEBUG 124 static const intptr_t kActivationThreshold = 8 * MB; 125 #else 126 static const intptr_t kActivationThreshold = 0; 127 #endif 128 129 void FinalizeSweeping(); 130 131 size_t Step(size_t bytes_to_process, CompletionAction action, 132 ForceCompletionAction completion, StepOrigin step_origin); 133 134 inline void RestartIfNotMarking(); 135 136 static void RecordWriteFromCode(HeapObject* obj, Object** slot, 137 Isolate* isolate); 138 139 static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot, 140 Isolate* isolate); 141 142 // Record a slot for compaction. Returns false for objects that are 143 // guaranteed to be rescanned or not guaranteed to survive. 144 // 145 // No slots in white objects should be recorded, as some slots are typed and 146 // cannot be interpreted correctly if the underlying object does not survive 147 // the incremental cycle (stays white). 148 INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value)); 149 INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); 150 INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value)); 151 INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, 152 Code* value)); 153 154 void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); 155 void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value); 156 void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); 157 void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); 158 void RecordCodeTargetPatch(Address pc, HeapObject* value); 159 160 void WhiteToGreyAndPush(HeapObject* obj); 161 SetOldSpacePageFlags(MemoryChunk * chunk)162 inline void SetOldSpacePageFlags(MemoryChunk* chunk) { 163 SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); 164 } 165 SetNewSpacePageFlags(Page * chunk)166 inline void SetNewSpacePageFlags(Page* chunk) { 167 SetNewSpacePageFlags(chunk, IsMarking()); 168 } 169 IsCompacting()170 bool IsCompacting() { return IsMarking() && is_compacting_; } 171 172 void ActivateGeneratedStub(Code* stub); 173 NotifyIncompleteScanOfObject(int unscanned_bytes)174 void NotifyIncompleteScanOfObject(int unscanned_bytes) { 175 unscanned_bytes_of_large_object_ = unscanned_bytes; 176 } 177 178 void ClearIdleMarkingDelayCounter(); 179 180 bool IsIdleMarkingDelayCounterLimitReached(); 181 182 static void MarkGrey(Heap* heap, HeapObject* object); 183 184 static void MarkBlack(HeapObject* object, int size); 185 186 static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to); 187 188 // Returns true if the color transfer requires live bytes updating. INLINE(static bool TransferColor (HeapObject * from,HeapObject * to,int size))189 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to, 190 int size)) { 191 MarkBit from_mark_bit = ObjectMarking::MarkBitFrom(from); 192 MarkBit to_mark_bit = ObjectMarking::MarkBitFrom(to); 193 194 if (Marking::IsBlack(to_mark_bit)) { 195 DCHECK(to->GetHeap()->incremental_marking()->black_allocation()); 196 return false; 197 } 198 199 DCHECK(Marking::IsWhite(to_mark_bit)); 200 if (from_mark_bit.Get()) { 201 to_mark_bit.Set(); 202 if (from_mark_bit.Next().Get()) { 203 to_mark_bit.Next().Set(); 204 return true; 205 } 206 } 207 return false; 208 } 209 210 void IterateBlackObject(HeapObject* object); 211 heap()212 Heap* heap() const { return heap_; } 213 incremental_marking_job()214 IncrementalMarkingJob* incremental_marking_job() { 215 return &incremental_marking_job_; 216 } 217 black_allocation()218 bool black_allocation() { return black_allocation_; } 219 StartBlackAllocationForTesting()220 void StartBlackAllocationForTesting() { StartBlackAllocation(); } 221 222 void AbortBlackAllocation(); 223 224 private: 225 class Observer : public AllocationObserver { 226 public: Observer(IncrementalMarking & incremental_marking,intptr_t step_size)227 Observer(IncrementalMarking& incremental_marking, intptr_t step_size) 228 : AllocationObserver(step_size), 229 incremental_marking_(incremental_marking) {} 230 Step(int bytes_allocated,Address,size_t)231 void Step(int bytes_allocated, Address, size_t) override { 232 incremental_marking_.AdvanceIncrementalMarkingOnAllocation(); 233 } 234 235 private: 236 IncrementalMarking& incremental_marking_; 237 }; 238 239 int64_t SpaceLeftInOldSpace(); 240 241 void StartMarking(); 242 243 void StartBlackAllocation(); 244 void FinishBlackAllocation(); 245 246 void MarkRoots(); 247 void MarkObjectGroups(); 248 void ProcessWeakCells(); 249 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to 250 // increase chances of reusing of map transition tree in future. 251 void RetainMaps(); 252 253 void ActivateIncrementalWriteBarrier(PagedSpace* space); 254 static void ActivateIncrementalWriteBarrier(NewSpace* space); 255 void ActivateIncrementalWriteBarrier(); 256 257 static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); 258 static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); 259 void DeactivateIncrementalWriteBarrier(); 260 261 static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking, 262 bool is_compacting); 263 264 static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking); 265 266 INLINE(void ProcessMarkingDeque()); 267 268 INLINE(intptr_t ProcessMarkingDeque( 269 intptr_t bytes_to_process, 270 ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION)); 271 272 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); 273 274 void IncrementIdleMarkingDelayCounter(); 275 276 void AdvanceIncrementalMarkingOnAllocation(); 277 278 size_t StepSizeToKeepUpWithAllocations(); 279 size_t StepSizeToMakeProgress(); 280 281 Heap* heap_; 282 283 State state_; 284 285 double start_time_ms_; 286 size_t initial_old_generation_size_; 287 size_t old_generation_allocation_counter_; 288 size_t bytes_allocated_; 289 size_t bytes_marked_ahead_of_schedule_; 290 size_t unscanned_bytes_of_large_object_; 291 292 int idle_marking_delay_counter_; 293 int incremental_marking_finalization_rounds_; 294 295 bool is_compacting_; 296 bool should_hurry_; 297 bool was_activated_; 298 bool black_allocation_; 299 bool finalize_marking_completed_; 300 bool trace_wrappers_toggle_; 301 302 GCRequestType request_type_; 303 304 IncrementalMarkingJob incremental_marking_job_; 305 Observer new_generation_observer_; 306 Observer old_generation_observer_; 307 308 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); 309 }; 310 } // namespace internal 311 } // namespace v8 312 313 #endif // V8_HEAP_INCREMENTAL_MARKING_H_ 314