• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/memory-reducer.h"
6 
7 #include "src/flags.h"
8 #include "src/heap/gc-tracer.h"
9 #include "src/heap/heap-inl.h"
10 #include "src/utils.h"
11 #include "src/v8.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 const int MemoryReducer::kLongDelayMs = 8000;
17 const int MemoryReducer::kShortDelayMs = 500;
18 const int MemoryReducer::kWatchdogDelayMs = 100000;
19 const int MemoryReducer::kMaxNumberOfGCs = 3;
20 const double MemoryReducer::kCommittedMemoryFactor = 1.1;
21 const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
22 
TimerTask(MemoryReducer * memory_reducer)23 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
24     : CancelableTask(memory_reducer->heap()->isolate()),
25       memory_reducer_(memory_reducer) {}
26 
27 
RunInternal()28 void MemoryReducer::TimerTask::RunInternal() {
29   Heap* heap = memory_reducer_->heap();
30   Event event;
31   double time_ms = heap->MonotonicallyIncreasingTimeInMs();
32   heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
33                                    heap->OldGenerationAllocationCounter());
34   bool low_allocation_rate = heap->HasLowAllocationRate();
35   bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
36   if (FLAG_trace_gc_verbose) {
37     heap->isolate()->PrintWithTimestamp(
38         "Memory reducer: %s, %s\n",
39         low_allocation_rate ? "low alloc" : "high alloc",
40         optimize_for_memory ? "background" : "foreground");
41   }
42   event.type = kTimer;
43   event.time_ms = time_ms;
44   // The memory reducer will start incremental markig if
45   // 1) mutator is likely idle: js call rate is low and allocation rate is low.
46   // 2) mutator is in background: optimize for memory flag is set.
47   event.should_start_incremental_gc =
48       low_allocation_rate || optimize_for_memory;
49   event.can_start_incremental_gc =
50       heap->incremental_marking()->IsStopped() &&
51       (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
52   event.committed_memory = heap->CommittedOldGenerationMemory();
53   memory_reducer_->NotifyTimer(event);
54 }
55 
56 
NotifyTimer(const Event & event)57 void MemoryReducer::NotifyTimer(const Event& event) {
58   DCHECK_EQ(kTimer, event.type);
59   DCHECK_EQ(kWait, state_.action);
60   state_ = Step(state_, event);
61   if (state_.action == kRun) {
62     DCHECK(heap()->incremental_marking()->IsStopped());
63     DCHECK(FLAG_incremental_marking);
64     if (FLAG_trace_gc_verbose) {
65       heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
66                                             state_.started_gcs);
67     }
68     heap()->StartIdleIncrementalMarking(
69         GarbageCollectionReason::kMemoryReducer);
70   } else if (state_.action == kWait) {
71     if (!heap()->incremental_marking()->IsStopped() &&
72         heap()->ShouldOptimizeForMemoryUsage()) {
73       // Make progress with pending incremental marking if memory usage has
74       // higher priority than latency. This is important for background tabs
75       // that do not send idle notifications.
76       const int kIncrementalMarkingDelayMs = 500;
77       double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
78                         kIncrementalMarkingDelayMs;
79       heap()->incremental_marking()->AdvanceIncrementalMarking(
80           deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
81           IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
82       heap()->FinalizeIncrementalMarkingIfComplete(
83           GarbageCollectionReason::kFinalizeMarkingViaTask);
84     }
85     // Re-schedule the timer.
86     ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
87     if (FLAG_trace_gc_verbose) {
88       heap()->isolate()->PrintWithTimestamp(
89           "Memory reducer: waiting for %.f ms\n",
90           state_.next_gc_start_ms - event.time_ms);
91     }
92   }
93 }
94 
95 
NotifyMarkCompact(const Event & event)96 void MemoryReducer::NotifyMarkCompact(const Event& event) {
97   DCHECK_EQ(kMarkCompact, event.type);
98   Action old_action = state_.action;
99   state_ = Step(state_, event);
100   if (old_action != kWait && state_.action == kWait) {
101     // If we are transitioning to the WAIT state, start the timer.
102     ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
103   }
104   if (old_action == kRun) {
105     if (FLAG_trace_gc_verbose) {
106       heap()->isolate()->PrintWithTimestamp(
107           "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
108           state_.action == kWait ? "will do more" : "done");
109     }
110   }
111 }
112 
NotifyPossibleGarbage(const Event & event)113 void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
114   DCHECK_EQ(kPossibleGarbage, event.type);
115   Action old_action = state_.action;
116   state_ = Step(state_, event);
117   if (old_action != kWait && state_.action == kWait) {
118     // If we are transitioning to the WAIT state, start the timer.
119     ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
120   }
121 }
122 
123 
WatchdogGC(const State & state,const Event & event)124 bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
125   return state.last_gc_time_ms != 0 &&
126          event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
127 }
128 
129 
130 // For specification of this function see the comment for MemoryReducer class.
Step(const State & state,const Event & event)131 MemoryReducer::State MemoryReducer::Step(const State& state,
132                                          const Event& event) {
133   if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
134     return State(kDone, 0, 0, state.last_gc_time_ms, 0);
135   }
136   switch (state.action) {
137     case kDone:
138       if (event.type == kTimer) {
139         return state;
140       } else if (event.type == kMarkCompact) {
141         if (event.committed_memory <
142             Max(static_cast<size_t>(state.committed_memory_at_last_run *
143                                     kCommittedMemoryFactor),
144                 state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
145           return state;
146         } else {
147           return State(kWait, 0, event.time_ms + kLongDelayMs,
148                        event.type == kMarkCompact ? event.time_ms
149                                                   : state.last_gc_time_ms,
150                        0);
151         }
152       } else {
153         DCHECK_EQ(kPossibleGarbage, event.type);
154         return State(
155             kWait, 0, event.time_ms + kLongDelayMs,
156             event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
157             0);
158       }
159     case kWait:
160       switch (event.type) {
161         case kPossibleGarbage:
162           return state;
163         case kTimer:
164           if (state.started_gcs >= kMaxNumberOfGCs) {
165             return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
166                          event.committed_memory);
167           } else if (event.can_start_incremental_gc &&
168                      (event.should_start_incremental_gc ||
169                       WatchdogGC(state, event))) {
170             if (state.next_gc_start_ms <= event.time_ms) {
171               return State(kRun, state.started_gcs + 1, 0.0,
172                            state.last_gc_time_ms, 0);
173             } else {
174               return state;
175             }
176           } else {
177             return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
178                          state.last_gc_time_ms, 0);
179           }
180         case kMarkCompact:
181           return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
182                        event.time_ms, 0);
183       }
184     case kRun:
185       if (event.type != kMarkCompact) {
186         return state;
187       } else {
188         if (state.started_gcs < kMaxNumberOfGCs &&
189             (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
190           return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
191                        event.time_ms, 0);
192         } else {
193           return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
194                        event.committed_memory);
195         }
196       }
197   }
198   UNREACHABLE();
199   return State(kDone, 0, 0, 0.0, 0);  // Make the compiler happy.
200 }
201 
202 
ScheduleTimer(double time_ms,double delay_ms)203 void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
204   DCHECK(delay_ms > 0);
205   // Leave some room for precision error in task scheduler.
206   const double kSlackMs = 100;
207   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
208   auto timer_task = new MemoryReducer::TimerTask(this);
209   V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
210       isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
211 }
212 
TearDown()213 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
214 
215 }  // namespace internal
216 }  // namespace v8
217