• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/runtime-profiler.h"
6 
7 #include "src/assembler.h"
8 #include "src/base/platform/platform.h"
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/compiler.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/global-handles.h"
16 #include "src/interpreter/interpreter.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Number of times a function has to be seen on the stack before it is
22 // optimized.
23 static const int kProfilerTicksBeforeOptimization = 2;
24 
25 // The number of ticks required for optimizing a function increases with
26 // the size of the bytecode. This is in addition to the
27 // kProfilerTicksBeforeOptimization required for any function.
28 static const int kBytecodeSizeAllowancePerTick = 1200;
29 
30 // Maximum size in bytes of generate code for a function to allow OSR.
31 static const int kOSRBytecodeSizeAllowanceBase = 180;
32 
33 static const int kOSRBytecodeSizeAllowancePerTick = 48;
34 
35 // Maximum size in bytes of generated code for a function to be optimized
36 // the very first time it is seen on the stack.
37 static const int kMaxBytecodeSizeForEarlyOpt = 90;
38 
39 // Certain functions are simply too big to be worth optimizing.
40 static const int kMaxBytecodeSizeForOpt = 60 * KB;
41 
42 #define OPTIMIZATION_REASON_LIST(V)                            \
43   V(DoNotOptimize, "do not optimize")                          \
44   V(HotAndStable, "hot and stable")                            \
45   V(SmallFunction, "small function")
46 
47 enum class OptimizationReason : uint8_t {
48 #define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
49   OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
50 #undef OPTIMIZATION_REASON_CONSTANTS
51 };
52 
OptimizationReasonToString(OptimizationReason reason)53 char const* OptimizationReasonToString(OptimizationReason reason) {
54   static char const* reasons[] = {
55 #define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
56       OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
57 #undef OPTIMIZATION_REASON_TEXTS
58   };
59   size_t const index = static_cast<size_t>(reason);
60   DCHECK_LT(index, arraysize(reasons));
61   return reasons[index];
62 }
63 
operator <<(std::ostream & os,OptimizationReason reason)64 std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
65   return os << OptimizationReasonToString(reason);
66 }
67 
RuntimeProfiler(Isolate * isolate)68 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
69     : isolate_(isolate),
70       any_ic_changed_(false) {
71 }
72 
GetICCounts(JSFunction * function,int * ic_with_type_info_count,int * ic_generic_count,int * ic_total_count,int * type_info_percentage,int * generic_percentage)73 static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
74                         int* ic_generic_count, int* ic_total_count,
75                         int* type_info_percentage, int* generic_percentage) {
76   // Harvest vector-ics.
77   FeedbackVector* vector = function->feedback_vector();
78   vector->ComputeCounts(ic_with_type_info_count, ic_generic_count,
79                         ic_total_count);
80 
81   if (*ic_total_count > 0) {
82     *type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
83     *generic_percentage = 100 * *ic_generic_count / *ic_total_count;
84   } else {
85     *type_info_percentage = 100;  // Compared against lower bound.
86     *generic_percentage = 0;      // Compared against upper bound.
87   }
88 }
89 
TraceRecompile(JSFunction * function,const char * reason,const char * type)90 static void TraceRecompile(JSFunction* function, const char* reason,
91                            const char* type) {
92   if (FLAG_trace_opt) {
93     PrintF("[marking ");
94     function->ShortPrint();
95     PrintF(" for %s recompilation, reason: %s", type, reason);
96     if (FLAG_type_info_threshold > 0) {
97       int typeinfo, generic, total, type_percentage, generic_percentage;
98       GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
99                   &generic_percentage);
100       PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
101              type_percentage);
102       PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
103     }
104     PrintF("]\n");
105   }
106 }
107 
Optimize(JSFunction * function,OptimizationReason reason)108 void RuntimeProfiler::Optimize(JSFunction* function,
109                                OptimizationReason reason) {
110   DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
111   TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
112   function->MarkForOptimization(ConcurrencyMode::kConcurrent);
113 }
114 
AttemptOnStackReplacement(JavaScriptFrame * frame,int loop_nesting_levels)115 void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
116                                                 int loop_nesting_levels) {
117   JSFunction* function = frame->function();
118   SharedFunctionInfo* shared = function->shared();
119   if (!FLAG_use_osr || !function->shared()->IsUserJavaScript()) {
120     return;
121   }
122 
123   // If the code is not optimizable, don't try OSR.
124   if (shared->optimization_disabled()) return;
125 
126   // We're using on-stack replacement: Store new loop nesting level in
127   // BytecodeArray header so that certain back edges in any interpreter frame
128   // for this bytecode will trigger on-stack replacement for that frame.
129   if (FLAG_trace_osr) {
130     PrintF("[OSR - arming back edges in ");
131     function->PrintName();
132     PrintF("]\n");
133   }
134 
135   DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
136   DCHECK(shared->HasBytecodeArray());
137   int level = shared->GetBytecodeArray()->osr_loop_nesting_level();
138   shared->GetBytecodeArray()->set_osr_loop_nesting_level(
139       Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
140 }
141 
MaybeOptimize(JSFunction * function,JavaScriptFrame * frame)142 void RuntimeProfiler::MaybeOptimize(JSFunction* function,
143                                     JavaScriptFrame* frame) {
144   if (function->IsInOptimizationQueue()) {
145     if (FLAG_trace_opt_verbose) {
146       PrintF("[function ");
147       function->PrintName();
148       PrintF(" is already in optimization queue]\n");
149     }
150     return;
151   }
152 
153   if (FLAG_always_osr) {
154     AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
155     // Fall through and do a normal optimized compile as well.
156   } else if (MaybeOSR(function, frame)) {
157     return;
158   }
159 
160   if (function->shared()->optimization_disabled()) return;
161 
162   if (frame->is_optimized()) return;
163 
164   OptimizationReason reason = ShouldOptimize(function, frame);
165 
166   if (reason != OptimizationReason::kDoNotOptimize) {
167     Optimize(function, reason);
168   }
169 }
170 
MaybeOSR(JSFunction * function,JavaScriptFrame * frame)171 bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
172   SharedFunctionInfo* shared = function->shared();
173   int ticks = function->feedback_vector()->profiler_ticks();
174 
175   // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
176   // than kMaxToplevelSourceSize.
177 
178   if (!frame->is_optimized() &&
179       (function->IsMarkedForOptimization() ||
180        function->IsMarkedForConcurrentOptimization() ||
181        function->HasOptimizedCode())) {
182     // Attempt OSR if we are still running interpreted code even though the
183     // the function has long been marked or even already been optimized.
184     int64_t allowance =
185         kOSRBytecodeSizeAllowanceBase +
186         static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
187     if (shared->GetBytecodeArray()->length() <= allowance) {
188       AttemptOnStackReplacement(frame);
189     }
190     return true;
191   }
192   return false;
193 }
194 
ShouldOptimize(JSFunction * function,JavaScriptFrame * frame)195 OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
196                                                    JavaScriptFrame* frame) {
197   SharedFunctionInfo* shared = function->shared();
198   int ticks = function->feedback_vector()->profiler_ticks();
199 
200   if (shared->GetBytecodeArray()->length() > kMaxBytecodeSizeForOpt) {
201     return OptimizationReason::kDoNotOptimize;
202   }
203 
204   int ticks_for_optimization =
205       kProfilerTicksBeforeOptimization +
206       (shared->GetBytecodeArray()->length() / kBytecodeSizeAllowancePerTick);
207   if (ticks >= ticks_for_optimization) {
208     return OptimizationReason::kHotAndStable;
209   } else if (!any_ic_changed_ && shared->GetBytecodeArray()->length() <
210                                      kMaxBytecodeSizeForEarlyOpt) {
211     // If no IC was patched since the last tick and this function is very
212     // small, optimistically optimize it now.
213     return OptimizationReason::kSmallFunction;
214   } else if (FLAG_trace_opt_verbose) {
215     PrintF("[not yet optimizing ");
216     function->PrintName();
217     PrintF(", not enough ticks: %d/%d and ", ticks,
218            kProfilerTicksBeforeOptimization);
219     if (any_ic_changed_) {
220       PrintF("ICs changed]\n");
221     } else {
222       PrintF(" too large for small function optimization: %d/%d]\n",
223              shared->GetBytecodeArray()->length(), kMaxBytecodeSizeForEarlyOpt);
224     }
225   }
226   return OptimizationReason::kDoNotOptimize;
227 }
228 
MarkCandidatesForOptimization()229 void RuntimeProfiler::MarkCandidatesForOptimization() {
230   HandleScope scope(isolate_);
231 
232   if (!isolate_->use_optimizer()) return;
233 
234   DisallowHeapAllocation no_gc;
235 
236   // Run through the JavaScript frames and collect them. If we already
237   // have a sample of the function, we mark it for optimizations
238   // (eagerly or lazily).
239   int frame_count = 0;
240   int frame_count_limit = FLAG_frame_count;
241   for (JavaScriptFrameIterator it(isolate_);
242        frame_count++ < frame_count_limit && !it.done();
243        it.Advance()) {
244     JavaScriptFrame* frame = it.frame();
245     if (frame->is_optimized()) continue;
246 
247     JSFunction* function = frame->function();
248     DCHECK(function->shared()->is_compiled());
249     if (!function->shared()->IsInterpreted()) continue;
250 
251     MaybeOptimize(function, frame);
252 
253     // TODO(leszeks): Move this increment to before the maybe optimize checks,
254     // and update the tests to assume the increment has already happened.
255     int ticks = function->feedback_vector()->profiler_ticks();
256     if (ticks < Smi::kMaxValue) {
257       function->feedback_vector()->set_profiler_ticks(ticks + 1);
258     }
259   }
260   any_ic_changed_ = false;
261 }
262 
263 }  // namespace internal
264 }  // namespace v8
265