1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/runtime-profiler.h"
6
7 #include "src/base/platform/platform.h"
8 #include "src/codegen/assembler.h"
9 #include "src/codegen/compilation-cache.h"
10 #include "src/codegen/compiler.h"
11 #include "src/codegen/pending-optimization-table.h"
12 #include "src/diagnostics/code-tracer.h"
13 #include "src/execution/execution.h"
14 #include "src/execution/frames-inl.h"
15 #include "src/handles/global-handles.h"
16 #include "src/init/bootstrapper.h"
17 #include "src/interpreter/interpreter.h"
18 #include "src/tracing/trace-event.h"
19
20 namespace v8 {
21 namespace internal {
22
23 // Number of times a function has to be seen on the stack before it is
24 // optimized.
25 static const int kProfilerTicksBeforeOptimization = 2;
26
27 // The number of ticks required for optimizing a function increases with
28 // the size of the bytecode. This is in addition to the
29 // kProfilerTicksBeforeOptimization required for any function.
30 static const int kBytecodeSizeAllowancePerTick = 1200;
31
32 // Maximum size in bytes of generate code for a function to allow OSR.
33 static const int kOSRBytecodeSizeAllowanceBase = 180;
34
35 static const int kOSRBytecodeSizeAllowancePerTick = 48;
36
37 // Maximum size in bytes of generated code for a function to be optimized
38 // the very first time it is seen on the stack.
39 static const int kMaxBytecodeSizeForEarlyOpt = 90;
40
41 // Number of times a function has to be seen on the stack before it is
42 // OSRed in TurboProp
43 // This value is chosen so TurboProp OSRs at similar time as TurboFan. The
44 // current interrupt budger of TurboFan is approximately 10 times that of
45 // TurboProp and we wait for 3 ticks (2 for marking for optimization and an
46 // additional tick to mark it for OSR) and hence this is set to 3 * 10.
47 static const int kProfilerTicksForTurboPropOSR = 3 * 10;
48
49 #define OPTIMIZATION_REASON_LIST(V) \
50 V(DoNotOptimize, "do not optimize") \
51 V(HotAndStable, "hot and stable") \
52 V(SmallFunction, "small function")
53
54 enum class OptimizationReason : uint8_t {
55 #define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
56 OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
57 #undef OPTIMIZATION_REASON_CONSTANTS
58 };
59
OptimizationReasonToString(OptimizationReason reason)60 char const* OptimizationReasonToString(OptimizationReason reason) {
61 static char const* reasons[] = {
62 #define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
63 OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
64 #undef OPTIMIZATION_REASON_TEXTS
65 };
66 size_t const index = static_cast<size_t>(reason);
67 DCHECK_LT(index, arraysize(reasons));
68 return reasons[index];
69 }
70
71 #undef OPTIMIZATION_REASON_LIST
72
operator <<(std::ostream & os,OptimizationReason reason)73 std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
74 return os << OptimizationReasonToString(reason);
75 }
76
77 namespace {
78
TraceInOptimizationQueue(JSFunction function)79 void TraceInOptimizationQueue(JSFunction function) {
80 if (FLAG_trace_opt_verbose) {
81 PrintF("[function ");
82 function.PrintName();
83 PrintF(" is already in optimization queue]\n");
84 }
85 }
86
TraceHeuristicOptimizationDisallowed(JSFunction function)87 void TraceHeuristicOptimizationDisallowed(JSFunction function) {
88 if (FLAG_trace_opt_verbose) {
89 PrintF("[function ");
90 function.PrintName();
91 PrintF(" has been marked manually for optimization]\n");
92 }
93 }
94
95 // TODO(jgruber): Remove this once we include this tracing with --trace-opt.
TraceNCIRecompile(JSFunction function,OptimizationReason reason)96 void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
97 if (FLAG_trace_turbo_nci) {
98 StdoutStream os;
99 os << "NCI tierup mark: " << Brief(function) << ", "
100 << OptimizationReasonToString(reason) << std::endl;
101 }
102 }
103
TraceRecompile(JSFunction function,OptimizationReason reason,CodeKind code_kind,Isolate * isolate)104 void TraceRecompile(JSFunction function, OptimizationReason reason,
105 CodeKind code_kind, Isolate* isolate) {
106 if (code_kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
107 TraceNCIRecompile(function, reason);
108 }
109 if (FLAG_trace_opt) {
110 CodeTracer::Scope scope(isolate->GetCodeTracer());
111 PrintF(scope.file(), "[marking ");
112 function.ShortPrint(scope.file());
113 PrintF(scope.file(), " for optimized recompilation, reason: %s",
114 OptimizationReasonToString(reason));
115 PrintF(scope.file(), "]\n");
116 }
117 }
118
119 } // namespace
120
RuntimeProfiler(Isolate * isolate)121 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
122 : isolate_(isolate), any_ic_changed_(false) {}
123
Optimize(JSFunction function,OptimizationReason reason,CodeKind code_kind)124 void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
125 CodeKind code_kind) {
126 DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
127 TraceRecompile(function, reason, code_kind, isolate_);
128 function.MarkForOptimization(ConcurrencyMode::kConcurrent);
129 }
130
AttemptOnStackReplacement(InterpretedFrame * frame,int loop_nesting_levels)131 void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
132 int loop_nesting_levels) {
133 JSFunction function = frame->function();
134 SharedFunctionInfo shared = function.shared();
135 if (!FLAG_use_osr || !shared.IsUserJavaScript()) {
136 return;
137 }
138
139 // If the code is not optimizable, don't try OSR.
140 if (shared.optimization_disabled()) return;
141
142 // We're using on-stack replacement: Store new loop nesting level in
143 // BytecodeArray header so that certain back edges in any interpreter frame
144 // for this bytecode will trigger on-stack replacement for that frame.
145 if (FLAG_trace_osr) {
146 CodeTracer::Scope scope(isolate_->GetCodeTracer());
147 PrintF(scope.file(), "[OSR - arming back edges in ");
148 function.PrintName(scope.file());
149 PrintF(scope.file(), "]\n");
150 }
151
152 DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
153 int level = frame->GetBytecodeArray().osr_loop_nesting_level();
154 frame->GetBytecodeArray().set_osr_loop_nesting_level(
155 Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
156 }
157
MaybeOptimizeFrame(JSFunction function,JavaScriptFrame * frame,CodeKind code_kind)158 void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
159 JavaScriptFrame* frame,
160 CodeKind code_kind) {
161 DCHECK(CodeKindCanTierUp(code_kind));
162 if (function.IsInOptimizationQueue()) {
163 TraceInOptimizationQueue(function);
164 return;
165 }
166
167 if (FLAG_testing_d8_test_runner &&
168 !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
169 function)) {
170 TraceHeuristicOptimizationDisallowed(function);
171 return;
172 }
173
174 if (function.shared().optimization_disabled()) return;
175
176 // Note: We currently do not trigger OSR compilation from NCI or TP code.
177 // TODO(jgruber,v8:8888): But we should.
178 if (frame->is_interpreted()) {
179 DCHECK_EQ(code_kind, CodeKind::INTERPRETED_FUNCTION);
180 if (FLAG_always_osr) {
181 AttemptOnStackReplacement(InterpretedFrame::cast(frame),
182 AbstractCode::kMaxLoopNestingMarker);
183 // Fall through and do a normal optimized compile as well.
184 } else if (MaybeOSR(function, InterpretedFrame::cast(frame))) {
185 return;
186 }
187 }
188
189 OptimizationReason reason =
190 ShouldOptimize(function, function.shared().GetBytecodeArray());
191
192 if (reason != OptimizationReason::kDoNotOptimize) {
193 Optimize(function, reason, code_kind);
194 }
195 }
196
MaybeOSR(JSFunction function,InterpretedFrame * frame)197 bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
198 int ticks = function.feedback_vector().profiler_ticks();
199 // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
200 // than kMaxToplevelSourceSize.
201
202 // Turboprop optimizes quite early. So don't attempt to OSR if the loop isn't
203 // hot enough.
204 if (FLAG_turboprop && ticks < kProfilerTicksForTurboPropOSR) {
205 return false;
206 }
207
208 if (function.IsMarkedForOptimization() ||
209 function.IsMarkedForConcurrentOptimization() ||
210 function.HasAvailableOptimizedCode()) {
211 // Attempt OSR if we are still running interpreted code even though the
212 // the function has long been marked or even already been optimized.
213 // TODO(turboprop, mythria): Currently we don't tier up from Turboprop code
214 // to Turbofan OSR code. When we start supporting this, the ticks have to be
215 // scaled accordingly
216 int64_t allowance =
217 kOSRBytecodeSizeAllowanceBase +
218 static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
219 if (function.shared().GetBytecodeArray().length() <= allowance) {
220 AttemptOnStackReplacement(frame);
221 }
222 return true;
223 }
224 return false;
225 }
226
ShouldOptimize(JSFunction function,BytecodeArray bytecode)227 OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
228 BytecodeArray bytecode) {
229 if (function.ActiveTierIsTurbofan()) {
230 return OptimizationReason::kDoNotOptimize;
231 }
232 if (V8_UNLIKELY(FLAG_turboprop) && function.ActiveTierIsToptierTurboprop()) {
233 return OptimizationReason::kDoNotOptimize;
234 }
235 int ticks = function.feedback_vector().profiler_ticks();
236 int scale_factor = function.ActiveTierIsMidtierTurboprop()
237 ? FLAG_ticks_scale_factor_for_top_tier
238 : 1;
239 int ticks_for_optimization =
240 kProfilerTicksBeforeOptimization +
241 (bytecode.length() / kBytecodeSizeAllowancePerTick);
242 ticks_for_optimization *= scale_factor;
243 if (ticks >= ticks_for_optimization) {
244 return OptimizationReason::kHotAndStable;
245 } else if (!any_ic_changed_ &&
246 bytecode.length() < kMaxBytecodeSizeForEarlyOpt) {
247 // TODO(turboprop, mythria): Do we need to support small function
248 // optimization for TP->TF tier up. If so, do we want to scale the bytecode
249 // size?
250 // If no IC was patched since the last tick and this function is very
251 // small, optimistically optimize it now.
252 return OptimizationReason::kSmallFunction;
253 } else if (FLAG_trace_opt_verbose) {
254 PrintF("[not yet optimizing ");
255 function.PrintName();
256 PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
257 if (any_ic_changed_) {
258 PrintF("ICs changed]\n");
259 } else {
260 PrintF(" too large for small function optimization: %d/%d]\n",
261 bytecode.length(), kMaxBytecodeSizeForEarlyOpt);
262 }
263 }
264 return OptimizationReason::kDoNotOptimize;
265 }
266
267 RuntimeProfiler::MarkCandidatesForOptimizationScope::
MarkCandidatesForOptimizationScope(RuntimeProfiler * profiler)268 MarkCandidatesForOptimizationScope(RuntimeProfiler* profiler)
269 : handle_scope_(profiler->isolate_), profiler_(profiler) {
270 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
271 "V8.MarkCandidatesForOptimization");
272 }
273
274 RuntimeProfiler::MarkCandidatesForOptimizationScope::
~MarkCandidatesForOptimizationScope()275 ~MarkCandidatesForOptimizationScope() {
276 profiler_->any_ic_changed_ = false;
277 }
278
MarkCandidatesForOptimizationFromBytecode()279 void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
280 if (!isolate_->use_optimizer()) return;
281 MarkCandidatesForOptimizationScope scope(this);
282 int i = 0;
283 for (JavaScriptFrameIterator it(isolate_); i < FLAG_frame_count && !it.done();
284 i++, it.Advance()) {
285 JavaScriptFrame* frame = it.frame();
286 if (!frame->is_interpreted()) continue;
287
288 JSFunction function = frame->function();
289 DCHECK(function.shared().is_compiled());
290 if (!function.shared().IsInterpreted()) continue;
291
292 if (!function.has_feedback_vector()) continue;
293
294 MaybeOptimizeFrame(function, frame, CodeKind::INTERPRETED_FUNCTION);
295
296 // TODO(leszeks): Move this increment to before the maybe optimize checks,
297 // and update the tests to assume the increment has already happened.
298 function.feedback_vector().SaturatingIncrementProfilerTicks();
299 }
300 }
301
MarkCandidatesForOptimizationFromCode()302 void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
303 if (!isolate_->use_optimizer()) return;
304 MarkCandidatesForOptimizationScope scope(this);
305 int i = 0;
306 for (JavaScriptFrameIterator it(isolate_); i < FLAG_frame_count && !it.done();
307 i++, it.Advance()) {
308 JavaScriptFrame* frame = it.frame();
309 if (!frame->is_optimized()) continue;
310
311 JSFunction function = frame->function();
312 auto code_kind = function.code().kind();
313 if (!CodeKindIsOptimizedAndCanTierUp(code_kind)) {
314 continue;
315 }
316
317 DCHECK(function.shared().is_compiled());
318 DCHECK(function.has_feedback_vector());
319
320 function.feedback_vector().SaturatingIncrementProfilerTicks();
321
322 MaybeOptimizeFrame(function, frame, code_kind);
323 }
324 }
325
326 } // namespace internal
327 } // namespace v8
328