• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6 #define V8_HEAP_INCREMENTAL_MARKING_H_
7 
8 #include "src/cancelable-task.h"
9 #include "src/heap/heap.h"
10 #include "src/heap/incremental-marking-job.h"
11 #include "src/heap/mark-compact.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 class HeapObject;
17 class MarkBit;
18 class Map;
19 class Object;
20 class PagedSpace;
21 
22 enum class StepOrigin { kV8, kTask };
23 enum class WorklistToProcess { kAll, kBailout };
24 
25 class V8_EXPORT_PRIVATE IncrementalMarking {
26  public:
27   enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
28 
29   enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
30 
31   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
32 
33   enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
34 
35 #ifdef V8_CONCURRENT_MARKING
36   using MarkingState = IncrementalMarkingState;
37 #else
38   using MarkingState = MajorNonAtomicMarkingState;
39 #endif  // V8_CONCURRENT_MARKING
40   using AtomicMarkingState = MajorAtomicMarkingState;
41   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
42 
43   class PauseBlackAllocationScope {
44    public:
PauseBlackAllocationScope(IncrementalMarking * marking)45     explicit PauseBlackAllocationScope(IncrementalMarking* marking)
46         : marking_(marking), paused_(false) {
47       if (marking_->black_allocation()) {
48         paused_ = true;
49         marking_->PauseBlackAllocation();
50       }
51     }
52 
~PauseBlackAllocationScope()53     ~PauseBlackAllocationScope() {
54       if (paused_) {
55         marking_->StartBlackAllocation();
56       }
57     }
58 
59    private:
60     IncrementalMarking* marking_;
61     bool paused_;
62   };
63 
64   // It's hard to know how much work the incremental marker should do to make
65   // progress in the face of the mutator creating new work for it.  We start
66   // of at a moderate rate of work and gradually increase the speed of the
67   // incremental marker until it completes.
68   // Do some marking every time this much memory has been allocated or that many
69   // heavy (color-checking) write barriers have been invoked.
70   static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
71   static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
72   static const size_t kMinStepSizeInBytes = 64 * KB;
73 
74   static const int kStepSizeInMs = 1;
75   static const int kMaxStepSizeInMs = 5;
76 
77 #ifndef DEBUG
78   static const intptr_t kActivationThreshold = 8 * MB;
79 #else
80   static const intptr_t kActivationThreshold = 0;
81 #endif
82 
83 #ifdef V8_CONCURRENT_MARKING
84   static const AccessMode kAtomicity = AccessMode::ATOMIC;
85 #else
86   static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
87 #endif
88 
89   IncrementalMarking(Heap* heap,
90                      MarkCompactCollector::MarkingWorklist* marking_worklist,
91                      WeakObjects* weak_objects);
92 
marking_state()93   MarkingState* marking_state() { return &marking_state_; }
94 
atomic_marking_state()95   AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
96 
non_atomic_marking_state()97   NonAtomicMarkingState* non_atomic_marking_state() {
98     return &non_atomic_marking_state_;
99   }
100 
101   void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
102 
TransferColor(HeapObject * from,HeapObject * to)103   V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
104     if (atomic_marking_state()->IsBlack(to)) {
105       DCHECK(black_allocation());
106       return;
107     }
108 
109     DCHECK(atomic_marking_state()->IsWhite(to));
110     if (atomic_marking_state()->IsGrey(from)) {
111       bool success = atomic_marking_state()->WhiteToGrey(to);
112       DCHECK(success);
113       USE(success);
114     } else if (atomic_marking_state()->IsBlack(from)) {
115       bool success = atomic_marking_state()->WhiteToBlack(to);
116       DCHECK(success);
117       USE(success);
118     }
119   }
120 
state()121   State state() const {
122     DCHECK(state_ == STOPPED || FLAG_incremental_marking);
123     return state_;
124   }
125 
should_hurry()126   bool should_hurry() const { return should_hurry_; }
set_should_hurry(bool val)127   void set_should_hurry(bool val) { should_hurry_ = val; }
128 
finalize_marking_completed()129   bool finalize_marking_completed() const {
130     return finalize_marking_completed_;
131   }
132 
SetWeakClosureWasOverApproximatedForTesting(bool val)133   void SetWeakClosureWasOverApproximatedForTesting(bool val) {
134     finalize_marking_completed_ = val;
135   }
136 
IsStopped()137   inline bool IsStopped() const { return state() == STOPPED; }
138 
IsSweeping()139   inline bool IsSweeping() const { return state() == SWEEPING; }
140 
IsMarking()141   inline bool IsMarking() const { return state() >= MARKING; }
142 
IsMarkingIncomplete()143   inline bool IsMarkingIncomplete() const { return state() == MARKING; }
144 
IsComplete()145   inline bool IsComplete() const { return state() == COMPLETE; }
146 
IsReadyToOverApproximateWeakClosure()147   inline bool IsReadyToOverApproximateWeakClosure() const {
148     return request_type_ == FINALIZATION && !finalize_marking_completed_;
149   }
150 
NeedsFinalization()151   inline bool NeedsFinalization() {
152     return IsMarking() &&
153            (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
154   }
155 
request_type()156   GCRequestType request_type() const { return request_type_; }
157 
reset_request_type()158   void reset_request_type() { request_type_ = NONE; }
159 
160   bool CanBeActivated();
161 
162   bool WasActivated();
163 
164   void Start(GarbageCollectionReason gc_reason);
165 
166   void FinalizeIncrementally();
167 
168   void UpdateMarkingWorklistAfterScavenge();
169   void UpdateWeakReferencesAfterScavenge();
170   void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
171 
172   void Hurry();
173 
174   void Finalize();
175 
176   void Stop();
177 
178   void FinalizeMarking(CompletionAction action);
179 
180   void MarkingComplete(CompletionAction action);
181 
182   void Epilogue();
183 
184   // Performs incremental marking steps until deadline_in_ms is reached. It
185   // returns the remaining time that cannot be used for incremental marking
186   // anymore because a single step would exceed the deadline.
187   double AdvanceIncrementalMarking(double deadline_in_ms,
188                                    CompletionAction completion_action,
189                                    StepOrigin step_origin);
190 
191   void FinalizeSweeping();
192 
193   size_t Step(size_t bytes_to_process, CompletionAction action,
194               StepOrigin step_origin,
195               WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
196 
197   inline void RestartIfNotMarking();
198 
199   static int RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
200                                  Isolate* isolate);
201 
202   // Record a slot for compaction.  Returns false for objects that are
203   // guaranteed to be rescanned or not guaranteed to survive.
204   //
205   // No slots in white objects should be recorded, as some slots are typed and
206   // cannot be interpreted correctly if the underlying object does not survive
207   // the incremental cycle (stays white).
208   V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
209   V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
210   V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
211                                       MaybeObject* value);
212   void RevisitObject(HeapObject* obj);
213 
214   void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
215                        Object* value);
216   void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, HeapObject* value);
217 
218   // Returns true if the function succeeds in transitioning the object
219   // from white to grey.
220   bool WhiteToGreyAndPush(HeapObject* obj);
221 
222   // This function is used to color the object black before it undergoes an
223   // unsafe layout change. This is a part of synchronization protocol with
224   // the concurrent marker.
225   void MarkBlackAndPush(HeapObject* obj);
226 
IsCompacting()227   bool IsCompacting() { return IsMarking() && is_compacting_; }
228 
229   void ActivateGeneratedStub(Code* stub);
230 
NotifyIncompleteScanOfObject(int unscanned_bytes)231   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
232     unscanned_bytes_of_large_object_ = unscanned_bytes;
233   }
234 
235   void ProcessBlackAllocatedObject(HeapObject* obj);
236 
heap()237   Heap* heap() const { return heap_; }
238 
incremental_marking_job()239   IncrementalMarkingJob* incremental_marking_job() {
240     return &incremental_marking_job_;
241   }
242 
black_allocation()243   bool black_allocation() { return black_allocation_; }
244 
StartBlackAllocationForTesting()245   void StartBlackAllocationForTesting() {
246     if (!black_allocation_) {
247       StartBlackAllocation();
248     }
249   }
250 
251   void AbortBlackAllocation();
252 
marking_worklist()253   MarkCompactCollector::MarkingWorklist* marking_worklist() const {
254     return marking_worklist_;
255   }
256 
257   void Deactivate();
258 
259  private:
260   class Observer : public AllocationObserver {
261    public:
Observer(IncrementalMarking & incremental_marking,intptr_t step_size)262     Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
263         : AllocationObserver(step_size),
264           incremental_marking_(incremental_marking) {}
265 
266     void Step(int bytes_allocated, Address, size_t) override;
267 
268    private:
269     IncrementalMarking& incremental_marking_;
270   };
271 
272   void StartMarking();
273 
274   void StartBlackAllocation();
275   void PauseBlackAllocation();
276   void FinishBlackAllocation();
277 
278   void MarkRoots();
279   bool ShouldRetainMap(Map* map, int age);
280   // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
281   // increase chances of reusing of map transition tree in future.
282   void RetainMaps();
283 
284   void ActivateIncrementalWriteBarrier(PagedSpace* space);
285   void ActivateIncrementalWriteBarrier(NewSpace* space);
286   void ActivateIncrementalWriteBarrier();
287 
288   void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
289   void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
290   void DeactivateIncrementalWriteBarrier();
291 
292   template <WorklistToProcess worklist_to_process = WorklistToProcess::kAll>
293   V8_INLINE intptr_t ProcessMarkingWorklist(
294       intptr_t bytes_to_process,
295       ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
296 
297   V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject* object);
298 
299   // Visits the object and returns its size.
300   V8_INLINE int VisitObject(Map* map, HeapObject* obj);
301 
302   void IncrementIdleMarkingDelayCounter();
303 
304   void AdvanceIncrementalMarkingOnAllocation();
305 
306   size_t StepSizeToKeepUpWithAllocations();
307   size_t StepSizeToMakeProgress();
308 
SetState(State s)309   void SetState(State s) {
310     state_ = s;
311     heap_->SetIsMarkingFlag(s >= MARKING);
312   }
313 
314   Heap* const heap_;
315   MarkCompactCollector::MarkingWorklist* const marking_worklist_;
316   WeakObjects* weak_objects_;
317 
318   double start_time_ms_;
319   size_t initial_old_generation_size_;
320   size_t old_generation_allocation_counter_;
321   size_t bytes_allocated_;
322   size_t bytes_marked_ahead_of_schedule_;
323   // A sample of concurrent_marking()->TotalMarkedBytes() at the last
324   // incremental marking step. It is used for updating
325   // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
326   size_t bytes_marked_concurrently_;
327   size_t unscanned_bytes_of_large_object_;
328 
329   // Must use SetState() above to update state_
330   State state_;
331 
332   bool is_compacting_;
333   bool should_hurry_;
334   bool was_activated_;
335   bool black_allocation_;
336   bool finalize_marking_completed_;
337   bool trace_wrappers_toggle_;
338   IncrementalMarkingJob incremental_marking_job_;
339 
340   GCRequestType request_type_;
341 
342   Observer new_generation_observer_;
343   Observer old_generation_observer_;
344 
345   MarkingState marking_state_;
346   AtomicMarkingState atomic_marking_state_;
347   NonAtomicMarkingState non_atomic_marking_state_;
348 
349   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
350 };
351 }  // namespace internal
352 }  // namespace v8
353 
354 #endif  // V8_HEAP_INCREMENTAL_MARKING_H_
355