• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6 #define V8_HEAP_INCREMENTAL_MARKING_H_
7 
8 #include "src/cancelable-task.h"
9 #include "src/execution.h"
10 #include "src/heap/heap.h"
11 #include "src/heap/incremental-marking-job.h"
12 #include "src/heap/spaces.h"
13 #include "src/objects.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 // Forward declarations.
19 class MarkBit;
20 class PagedSpace;
21 
22 class IncrementalMarking {
23  public:
24   enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
25 
26   enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
27 
28   enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
29 
30   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
31 
32   enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
33 
34   struct StepActions {
StepActionsStepActions35     StepActions(CompletionAction complete_action_,
36                 ForceMarkingAction force_marking_,
37                 ForceCompletionAction force_completion_)
38         : completion_action(complete_action_),
39           force_marking(force_marking_),
40           force_completion(force_completion_) {}
41 
42     CompletionAction completion_action;
43     ForceMarkingAction force_marking;
44     ForceCompletionAction force_completion;
45   };
46 
47   static StepActions IdleStepActions();
48 
49   explicit IncrementalMarking(Heap* heap);
50 
51   static void Initialize();
52 
state()53   State state() {
54     DCHECK(state_ == STOPPED || FLAG_incremental_marking);
55     return state_;
56   }
57 
should_hurry()58   bool should_hurry() { return should_hurry_; }
set_should_hurry(bool val)59   void set_should_hurry(bool val) { should_hurry_ = val; }
60 
finalize_marking_completed()61   bool finalize_marking_completed() const {
62     return finalize_marking_completed_;
63   }
64 
SetWeakClosureWasOverApproximatedForTesting(bool val)65   void SetWeakClosureWasOverApproximatedForTesting(bool val) {
66     finalize_marking_completed_ = val;
67   }
68 
IsStopped()69   inline bool IsStopped() { return state() == STOPPED; }
70 
IsSweeping()71   inline bool IsSweeping() { return state() == SWEEPING; }
72 
INLINE(bool IsMarking ())73   INLINE(bool IsMarking()) { return state() >= MARKING; }
74 
IsMarkingIncomplete()75   inline bool IsMarkingIncomplete() { return state() == MARKING; }
76 
IsComplete()77   inline bool IsComplete() { return state() == COMPLETE; }
78 
IsReadyToOverApproximateWeakClosure()79   inline bool IsReadyToOverApproximateWeakClosure() const {
80     return request_type_ == FINALIZATION && !finalize_marking_completed_;
81   }
82 
request_type()83   GCRequestType request_type() const { return request_type_; }
84 
reset_request_type()85   void reset_request_type() { request_type_ = NONE; }
86 
87   bool CanBeActivated();
88 
89   bool ShouldActivateEvenWithoutIdleNotification();
90 
91   bool WasActivated();
92 
93   void Start(const char* reason = nullptr);
94 
95   void FinalizeIncrementally();
96 
97   void UpdateMarkingDequeAfterScavenge();
98 
99   void Hurry();
100 
101   void Finalize();
102 
103   void Stop();
104 
105   void FinalizeMarking(CompletionAction action);
106 
107   void MarkingComplete(CompletionAction action);
108 
109   void Epilogue();
110 
111   // Performs incremental marking steps until deadline_in_ms is reached. It
112   // returns the remaining time that cannot be used for incremental marking
113   // anymore because a single step would exceed the deadline.
114   double AdvanceIncrementalMarking(double deadline_in_ms,
115                                    StepActions step_actions);
116 
117   // It's hard to know how much work the incremental marker should do to make
118   // progress in the face of the mutator creating new work for it.  We start
119   // of at a moderate rate of work and gradually increase the speed of the
120   // incremental marker until it completes.
121   // Do some marking every time this much memory has been allocated or that many
122   // heavy (color-checking) write barriers have been invoked.
123   static const intptr_t kAllocatedThreshold = 65536;
124   static const intptr_t kWriteBarriersInvokedThreshold = 32768;
125   // Start off by marking this many times more memory than has been allocated.
126   static const intptr_t kInitialMarkingSpeed = 1;
127   // But if we are promoting a lot of data we need to mark faster to keep up
128   // with the data that is entering the old space through promotion.
129   static const intptr_t kFastMarking = 3;
130   // After this many steps we increase the marking/allocating factor.
131   static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
132   // This is how much we increase the marking/allocating factor by.
133   static const intptr_t kMarkingSpeedAccelleration = 2;
134   static const intptr_t kMaxMarkingSpeed = 1000;
135 
136   // This is the upper bound for how many times we allow finalization of
137   // incremental marking to be postponed.
138   static const size_t kMaxIdleMarkingDelayCounter = 3;
139 
140   void FinalizeSweeping();
141 
142   void OldSpaceStep(intptr_t allocated);
143 
144   intptr_t Step(intptr_t allocated, CompletionAction action,
145                 ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
146                 ForceCompletionAction completion = FORCE_COMPLETION);
147 
RestartIfNotMarking()148   inline void RestartIfNotMarking() {
149     if (state_ == COMPLETE) {
150       state_ = MARKING;
151       if (FLAG_trace_incremental_marking) {
152         PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
153       }
154     }
155   }
156 
157   static void RecordWriteFromCode(HeapObject* obj, Object** slot,
158                                   Isolate* isolate);
159 
160   static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
161                                              Isolate* isolate);
162 
163   // Record a slot for compaction.  Returns false for objects that are
164   // guaranteed to be rescanned or not guaranteed to survive.
165   //
166   // No slots in white objects should be recorded, as some slots are typed and
167   // cannot be interpreted correctly if the underlying object does not survive
168   // the incremental cycle (stays white).
169   INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
170   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
171   INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
172   INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
173                                      Code* value));
174 
175 
176   void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
177   void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
178   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
179   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
180   void RecordCodeTargetPatch(Address pc, HeapObject* value);
181 
182   void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
183 
SetOldSpacePageFlags(MemoryChunk * chunk)184   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
185     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
186   }
187 
SetNewSpacePageFlags(Page * chunk)188   inline void SetNewSpacePageFlags(Page* chunk) {
189     SetNewSpacePageFlags(chunk, IsMarking());
190   }
191 
IsCompacting()192   bool IsCompacting() { return IsMarking() && is_compacting_; }
193 
194   void ActivateGeneratedStub(Code* stub);
195 
196   void NotifyOfHighPromotionRate();
197 
NotifyIncompleteScanOfObject(int unscanned_bytes)198   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
199     unscanned_bytes_of_large_object_ = unscanned_bytes;
200   }
201 
202   void ClearIdleMarkingDelayCounter();
203 
204   bool IsIdleMarkingDelayCounterLimitReached();
205 
206   static void MarkObject(Heap* heap, HeapObject* object);
207 
208   void IterateBlackObject(HeapObject* object);
209 
heap()210   Heap* heap() const { return heap_; }
211 
incremental_marking_job()212   IncrementalMarkingJob* incremental_marking_job() {
213     return &incremental_marking_job_;
214   }
215 
black_allocation()216   bool black_allocation() { return black_allocation_; }
217 
StartBlackAllocationForTesting()218   void StartBlackAllocationForTesting() { StartBlackAllocation(); }
219 
220  private:
221   class Observer : public AllocationObserver {
222    public:
Observer(IncrementalMarking & incremental_marking,intptr_t step_size)223     Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
224         : AllocationObserver(step_size),
225           incremental_marking_(incremental_marking) {}
226 
Step(int bytes_allocated,Address,size_t)227     void Step(int bytes_allocated, Address, size_t) override {
228       incremental_marking_.Step(bytes_allocated,
229                                 IncrementalMarking::GC_VIA_STACK_GUARD);
230     }
231 
232    private:
233     IncrementalMarking& incremental_marking_;
234   };
235 
236   int64_t SpaceLeftInOldSpace();
237 
238   void SpeedUp();
239 
240   void ResetStepCounters();
241 
242   void StartMarking();
243 
244   void StartBlackAllocation();
245   void FinishBlackAllocation();
246 
247   void MarkRoots();
248   void MarkObjectGroups();
249   void ProcessWeakCells();
250   // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
251   // increase chances of reusing of map transition tree in future.
252   void RetainMaps();
253 
254   void ActivateIncrementalWriteBarrier(PagedSpace* space);
255   static void ActivateIncrementalWriteBarrier(NewSpace* space);
256   void ActivateIncrementalWriteBarrier();
257 
258   static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
259   static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
260   void DeactivateIncrementalWriteBarrier();
261 
262   static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
263                                    bool is_compacting);
264 
265   static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
266 
267   INLINE(void ProcessMarkingDeque());
268 
269   INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
270 
271   INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
272 
273   void IncrementIdleMarkingDelayCounter();
274 
275   Heap* heap_;
276 
277   Observer observer_;
278 
279   State state_;
280   bool is_compacting_;
281 
282   int steps_count_;
283   int64_t old_generation_space_available_at_start_of_incremental_;
284   int64_t old_generation_space_used_at_start_of_incremental_;
285   int64_t bytes_rescanned_;
286   bool should_hurry_;
287   int marking_speed_;
288   intptr_t bytes_scanned_;
289   intptr_t allocated_;
290   intptr_t write_barriers_invoked_since_last_step_;
291   size_t idle_marking_delay_counter_;
292 
293   int unscanned_bytes_of_large_object_;
294 
295   bool was_activated_;
296 
297   bool black_allocation_;
298 
299   bool finalize_marking_completed_;
300 
301   int incremental_marking_finalization_rounds_;
302 
303   GCRequestType request_type_;
304 
305   IncrementalMarkingJob incremental_marking_job_;
306 
307   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
308 };
309 }  // namespace internal
310 }  // namespace v8
311 
312 #endif  // V8_HEAP_INCREMENTAL_MARKING_H_
313