1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_ 6 #define V8_HEAP_INCREMENTAL_MARKING_H_ 7 8 #include "src/base/platform/mutex.h" 9 #include "src/heap/heap.h" 10 #include "src/heap/incremental-marking-job.h" 11 #include "src/heap/mark-compact.h" 12 #include "src/tasks/cancelable-task.h" 13 14 namespace v8 { 15 namespace internal { 16 17 class HeapObject; 18 class MarkBit; 19 class Map; 20 class Object; 21 class PagedSpace; 22 23 enum class StepOrigin { kV8, kTask }; 24 enum class StepResult { 25 kNoImmediateWork, 26 kMoreWorkRemaining, 27 kWaitingForFinalization 28 }; 29 30 class V8_EXPORT_PRIVATE IncrementalMarking final { 31 public: 32 enum State : uint8_t { STOPPED, MARKING, COMPLETE }; 33 34 enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD }; 35 36 enum class GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION }; 37 38 using MarkingState = MarkCompactCollector::MarkingState; 39 using AtomicMarkingState = MarkCompactCollector::AtomicMarkingState; 40 using NonAtomicMarkingState = MarkCompactCollector::NonAtomicMarkingState; 41 42 class V8_NODISCARD PauseBlackAllocationScope { 43 public: PauseBlackAllocationScope(IncrementalMarking * marking)44 explicit PauseBlackAllocationScope(IncrementalMarking* marking) 45 : marking_(marking), paused_(false) { 46 if (marking_->black_allocation()) { 47 paused_ = true; 48 marking_->PauseBlackAllocation(); 49 } 50 } 51 ~PauseBlackAllocationScope()52 ~PauseBlackAllocationScope() { 53 if (paused_) { 54 marking_->StartBlackAllocation(); 55 } 56 } 57 58 private: 59 IncrementalMarking* marking_; 60 bool paused_; 61 }; 62 63 // It's hard to know how much work the incremental marker should do to make 64 // progress in the face of the mutator creating new work for it. We start 65 // of at a moderate rate of work and gradually increase the speed of the 66 // incremental marker until it completes. 67 // Do some marking every time this much memory has been allocated or that many 68 // heavy (color-checking) write barriers have been invoked. 69 static const size_t kYoungGenerationAllocatedThreshold = 64 * KB; 70 static const size_t kOldGenerationAllocatedThreshold = 256 * KB; 71 static const size_t kMinStepSizeInBytes = 64 * KB; 72 73 static constexpr double kStepSizeInMs = 1; 74 static constexpr double kMaxStepSizeInMs = 5; 75 76 #ifndef DEBUG 77 static constexpr size_t kV8ActivationThreshold = 8 * MB; 78 static constexpr size_t kEmbedderActivationThreshold = 8 * MB; 79 #else 80 static constexpr size_t kV8ActivationThreshold = 0; 81 static constexpr size_t kEmbedderActivationThreshold = 0; 82 #endif 83 84 static const AccessMode kAtomicity = AccessMode::ATOMIC; 85 86 IncrementalMarking(Heap* heap, WeakObjects* weak_objects); 87 marking_state()88 MarkingState* marking_state() { return &marking_state_; } 89 atomic_marking_state()90 AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; } 91 non_atomic_marking_state()92 NonAtomicMarkingState* non_atomic_marking_state() { 93 return &non_atomic_marking_state_; 94 } 95 96 void NotifyLeftTrimming(HeapObject from, HeapObject to); 97 98 V8_INLINE void TransferColor(HeapObject from, HeapObject to); 99 state()100 State state() const { 101 DCHECK(state_ == STOPPED || FLAG_incremental_marking); 102 return state_; 103 } 104 finalize_marking_completed()105 bool finalize_marking_completed() const { 106 return finalize_marking_completed_; 107 } 108 SetWeakClosureWasOverApproximatedForTesting(bool val)109 void SetWeakClosureWasOverApproximatedForTesting(bool val) { 110 finalize_marking_completed_ = val; 111 } 112 IsStopped()113 inline bool IsStopped() const { return state() == STOPPED; } 114 IsMarking()115 inline bool IsMarking() const { return state() >= MARKING; } 116 IsComplete()117 inline bool IsComplete() const { return state() == COMPLETE; } 118 IsReadyToOverApproximateWeakClosure()119 inline bool IsReadyToOverApproximateWeakClosure() const { 120 return request_type_ == GCRequestType::FINALIZATION && 121 !finalize_marking_completed_; 122 } 123 NeedsFinalization()124 inline bool NeedsFinalization() { 125 return IsMarking() && (request_type_ == GCRequestType::FINALIZATION || 126 request_type_ == GCRequestType::COMPLETE_MARKING); 127 } 128 request_type()129 GCRequestType request_type() const { return request_type_; } 130 reset_request_type()131 void reset_request_type() { request_type_ = GCRequestType::NONE; } 132 133 bool CanBeActivated(); 134 135 bool WasActivated(); 136 137 void Start(GarbageCollectionReason gc_reason); 138 // Returns true if incremental marking was running and false otherwise. 139 bool Stop(); 140 141 void FinalizeIncrementally(); 142 143 void UpdateMarkingWorklistAfterYoungGenGC(); 144 void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space); 145 146 void FinalizeMarking(CompletionAction action); 147 148 void MarkingComplete(CompletionAction action); 149 150 void Epilogue(); 151 152 // Performs incremental marking steps and returns before the deadline_in_ms is 153 // reached. It may return earlier if the marker is already ahead of the 154 // marking schedule, which is indicated with StepResult::kDone. 155 StepResult AdvanceWithDeadline(double deadline_in_ms, 156 CompletionAction completion_action, 157 StepOrigin step_origin); 158 159 void FinalizeSweeping(); 160 bool ContinueConcurrentSweeping(); 161 void SupportConcurrentSweeping(); 162 163 StepResult Step(double max_step_size_in_ms, CompletionAction action, 164 StepOrigin step_origin); 165 166 bool ShouldDoEmbedderStep(); 167 StepResult EmbedderStep(double expected_duration_ms, double* duration_ms); 168 169 V8_INLINE void RestartIfNotMarking(); 170 171 // Returns true if the function succeeds in transitioning the object 172 // from white to grey. 173 V8_INLINE bool WhiteToGreyAndPush(HeapObject obj); 174 175 // This function is used to color the object black before it undergoes an 176 // unsafe layout change. This is a part of synchronization protocol with 177 // the concurrent marker. 178 void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj); 179 180 void MarkBlackBackground(HeapObject obj, int object_size); 181 IsCompacting()182 bool IsCompacting() { return IsMarking() && is_compacting_; } 183 184 void ProcessBlackAllocatedObject(HeapObject obj); 185 heap()186 Heap* heap() const { return heap_; } 187 incremental_marking_job()188 IncrementalMarkingJob* incremental_marking_job() { 189 return &incremental_marking_job_; 190 } 191 black_allocation()192 bool black_allocation() { return black_allocation_; } 193 StartBlackAllocationForTesting()194 void StartBlackAllocationForTesting() { 195 if (!black_allocation_) { 196 StartBlackAllocation(); 197 } 198 } 199 local_marking_worklists()200 MarkingWorklists::Local* local_marking_worklists() const { 201 return collector_->local_marking_worklists(); 202 } 203 204 void Deactivate(); 205 206 // Ensures that the given region is black allocated if it is in the old 207 // generation. 208 void EnsureBlackAllocated(Address allocated, size_t size); 209 210 bool IsBelowActivationThresholds() const; 211 IncrementLiveBytesBackground(MemoryChunk * chunk,intptr_t by)212 void IncrementLiveBytesBackground(MemoryChunk* chunk, intptr_t by) { 213 base::MutexGuard guard(&background_live_bytes_mutex_); 214 background_live_bytes_[chunk] += by; 215 } 216 217 void MarkRootsForTesting(); 218 219 private: 220 class Observer : public AllocationObserver { 221 public: Observer(IncrementalMarking * incremental_marking,intptr_t step_size)222 Observer(IncrementalMarking* incremental_marking, intptr_t step_size) 223 : AllocationObserver(step_size), 224 incremental_marking_(incremental_marking) {} 225 226 void Step(int bytes_allocated, Address, size_t) override; 227 228 private: 229 IncrementalMarking* incremental_marking_; 230 }; 231 232 void StartMarking(); 233 234 void StartBlackAllocation(); 235 void PauseBlackAllocation(); 236 void FinishBlackAllocation(); 237 238 bool ShouldRetainMap(Map map, int age); 239 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to 240 // increase chances of reusing of map transition tree in future. 241 void RetainMaps(); 242 243 void PublishWriteBarrierWorklists(); 244 245 // Updates scheduled_bytes_to_mark_ to ensure marking progress based on 246 // time. 247 void ScheduleBytesToMarkBasedOnTime(double time_ms); 248 // Updates scheduled_bytes_to_mark_ to ensure marking progress based on 249 // allocations. 250 void ScheduleBytesToMarkBasedOnAllocation(); 251 // Helper functions for ScheduleBytesToMarkBasedOnAllocation. 252 size_t StepSizeToKeepUpWithAllocations(); 253 size_t StepSizeToMakeProgress(); 254 void AddScheduledBytesToMark(size_t bytes_to_mark); 255 256 // Schedules more bytes to mark so that the marker is no longer ahead 257 // of schedule. 258 void FastForwardSchedule(); 259 void FastForwardScheduleIfCloseToFinalization(); 260 261 // Fetches marked byte counters from the concurrent marker. 262 void FetchBytesMarkedConcurrently(); 263 264 // Returns the bytes to mark in the current step based on the scheduled 265 // bytes and already marked bytes. 266 size_t ComputeStepSizeInBytes(StepOrigin step_origin); 267 268 void AdvanceOnAllocation(); 269 SetState(State s)270 void SetState(State s) { 271 state_ = s; 272 heap_->SetIsMarkingFlag(s >= MARKING); 273 } 274 275 double CurrentTimeToMarkingTask() const; 276 277 Heap* const heap_; 278 MarkCompactCollector* const collector_; 279 WeakObjects* weak_objects_; 280 281 double start_time_ms_ = 0.0; 282 double time_to_force_completion_ = 0.0; 283 size_t initial_old_generation_size_ = 0; 284 size_t old_generation_allocation_counter_ = 0; 285 size_t bytes_marked_ = 0; 286 size_t scheduled_bytes_to_mark_ = 0; 287 double schedule_update_time_ms_ = 0.0; 288 // A sample of concurrent_marking()->TotalMarkedBytes() at the last 289 // incremental marking step. It is used for updating 290 // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking. 291 size_t bytes_marked_concurrently_ = 0; 292 293 // Must use SetState() above to update state_ 294 // Atomic since main thread can complete marking (= changing state), while a 295 // background thread's slow allocation path will check whether incremental 296 // marking is currently running. 297 std::atomic<State> state_; 298 299 bool is_compacting_ = false; 300 bool was_activated_ = false; 301 bool black_allocation_ = false; 302 bool finalize_marking_completed_ = false; 303 IncrementalMarkingJob incremental_marking_job_; 304 305 std::atomic<GCRequestType> request_type_{GCRequestType::NONE}; 306 307 Observer new_generation_observer_; 308 Observer old_generation_observer_; 309 310 MarkingState marking_state_; 311 AtomicMarkingState atomic_marking_state_; 312 NonAtomicMarkingState non_atomic_marking_state_; 313 314 base::Mutex background_live_bytes_mutex_; 315 std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_; 316 317 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); 318 }; 319 } // namespace internal 320 } // namespace v8 321 322 #endif // V8_HEAP_INCREMENTAL_MARKING_H_ 323