1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/runtime-profiler.h"
6
7 #include "src/assembler.h"
8 #include "src/ast/scopeinfo.h"
9 #include "src/base/platform/platform.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/compilation-cache.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/full-codegen/full-codegen.h"
16 #include "src/global-handles.h"
17
18 namespace v8 {
19 namespace internal {
20
21
22 // Number of times a function has to be seen on the stack before it is
23 // optimized.
24 static const int kProfilerTicksBeforeOptimization = 2;
25 // If the function optimization was disabled due to high deoptimization count,
26 // but the function is hot and has been seen on the stack this number of times,
27 // then we try to reenable optimization for this function.
28 static const int kProfilerTicksBeforeReenablingOptimization = 250;
29 // If a function does not have enough type info (according to
30 // FLAG_type_info_threshold), but has seen a huge number of ticks,
31 // optimize it as it is.
32 static const int kTicksWhenNotEnoughTypeInfo = 100;
33 // We only have one byte to store the number of ticks.
34 STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
35 STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
36 STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
37
38 // Maximum size in bytes of generate code for a function to allow OSR.
39 static const int kOSRCodeSizeAllowanceBase =
40 100 * FullCodeGenerator::kCodeSizeMultiplier;
41
42 static const int kOSRCodeSizeAllowancePerTick =
43 4 * FullCodeGenerator::kCodeSizeMultiplier;
44
45 // Maximum size in bytes of generated code for a function to be optimized
46 // the very first time it is seen on the stack.
47 static const int kMaxSizeEarlyOpt =
48 5 * FullCodeGenerator::kCodeSizeMultiplier;
49
50
RuntimeProfiler(Isolate * isolate)51 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
52 : isolate_(isolate),
53 any_ic_changed_(false) {
54 }
55
56
GetICCounts(SharedFunctionInfo * shared,int * ic_with_type_info_count,int * ic_generic_count,int * ic_total_count,int * type_info_percentage,int * generic_percentage)57 static void GetICCounts(SharedFunctionInfo* shared,
58 int* ic_with_type_info_count, int* ic_generic_count,
59 int* ic_total_count, int* type_info_percentage,
60 int* generic_percentage) {
61 Code* shared_code = shared->code();
62 *ic_total_count = 0;
63 *ic_generic_count = 0;
64 *ic_with_type_info_count = 0;
65 Object* raw_info = shared_code->type_feedback_info();
66 if (raw_info->IsTypeFeedbackInfo()) {
67 TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
68 *ic_with_type_info_count = info->ic_with_type_info_count();
69 *ic_generic_count = info->ic_generic_count();
70 *ic_total_count = info->ic_total_count();
71 }
72
73 // Harvest vector-ics as well
74 TypeFeedbackVector* vector = shared->feedback_vector();
75 int with = 0, gen = 0;
76 vector->ComputeCounts(&with, &gen);
77 *ic_with_type_info_count += with;
78 *ic_generic_count += gen;
79
80 if (*ic_total_count > 0) {
81 *type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
82 *generic_percentage = 100 * *ic_generic_count / *ic_total_count;
83 } else {
84 *type_info_percentage = 100; // Compared against lower bound.
85 *generic_percentage = 0; // Compared against upper bound.
86 }
87 }
88
89
Optimize(JSFunction * function,const char * reason)90 void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
91 if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
92 PrintF("[marking ");
93 function->ShortPrint();
94 PrintF(" for recompilation, reason: %s", reason);
95 if (FLAG_type_info_threshold > 0) {
96 int typeinfo, generic, total, type_percentage, generic_percentage;
97 GetICCounts(function->shared(), &typeinfo, &generic, &total,
98 &type_percentage, &generic_percentage);
99 PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
100 type_percentage);
101 PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
102 }
103 PrintF("]\n");
104 }
105
106 function->AttemptConcurrentOptimization();
107 }
108
109
AttemptOnStackReplacement(JSFunction * function,int loop_nesting_levels)110 void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
111 int loop_nesting_levels) {
112 SharedFunctionInfo* shared = function->shared();
113 if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
114 return;
115 }
116
117 // If the code is not optimizable, don't try OSR.
118 if (shared->optimization_disabled()) return;
119
120 // We are not prepared to do OSR for a function that already has an
121 // allocated arguments object. The optimized code would bypass it for
122 // arguments accesses, which is unsound. Don't try OSR.
123 if (shared->uses_arguments()) return;
124
125 // We're using on-stack replacement: patch the unoptimized code so that
126 // any back edge in any unoptimized frame will trigger on-stack
127 // replacement for that frame.
128 if (FLAG_trace_osr) {
129 PrintF("[OSR - patching back edges in ");
130 function->PrintName();
131 PrintF("]\n");
132 }
133
134 for (int i = 0; i < loop_nesting_levels; i++) {
135 BackEdgeTable::Patch(isolate_, shared->code());
136 }
137 }
138
139
OptimizeNow()140 void RuntimeProfiler::OptimizeNow() {
141 HandleScope scope(isolate_);
142
143 if (!isolate_->use_crankshaft()) return;
144
145 DisallowHeapAllocation no_gc;
146
147 // Run through the JavaScript frames and collect them. If we already
148 // have a sample of the function, we mark it for optimizations
149 // (eagerly or lazily).
150 int frame_count = 0;
151 int frame_count_limit = FLAG_frame_count;
152 for (JavaScriptFrameIterator it(isolate_);
153 frame_count++ < frame_count_limit && !it.done();
154 it.Advance()) {
155 JavaScriptFrame* frame = it.frame();
156 JSFunction* function = frame->function();
157
158 SharedFunctionInfo* shared = function->shared();
159 Code* shared_code = shared->code();
160
161 List<JSFunction*> functions(4);
162 frame->GetFunctions(&functions);
163 for (int i = functions.length(); --i >= 0; ) {
164 SharedFunctionInfo* shared_function_info = functions[i]->shared();
165 int ticks = shared_function_info->profiler_ticks();
166 if (ticks < Smi::kMaxValue) {
167 shared_function_info->set_profiler_ticks(ticks + 1);
168 }
169 }
170
171 if (shared_code->kind() != Code::FUNCTION) continue;
172 if (function->IsInOptimizationQueue()) continue;
173
174 if (FLAG_always_osr) {
175 AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
176 // Fall through and do a normal optimized compile as well.
177 } else if (!frame->is_optimized() &&
178 (function->IsMarkedForOptimization() ||
179 function->IsMarkedForConcurrentOptimization() ||
180 function->IsOptimized())) {
181 // Attempt OSR if we are still running unoptimized code even though the
182 // the function has long been marked or even already been optimized.
183 int ticks = shared_code->profiler_ticks();
184 int64_t allowance =
185 kOSRCodeSizeAllowanceBase +
186 static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
187 if (shared_code->CodeSize() > allowance &&
188 ticks < Code::ProfilerTicksField::kMax) {
189 shared_code->set_profiler_ticks(ticks + 1);
190 } else {
191 AttemptOnStackReplacement(function);
192 }
193 continue;
194 }
195
196 // Only record top-level code on top of the execution stack and
197 // avoid optimizing excessively large scripts since top-level code
198 // will be executed only once.
199 const int kMaxToplevelSourceSize = 10 * 1024;
200 if (shared->is_toplevel() &&
201 (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
202 continue;
203 }
204
205 // Do not record non-optimizable functions.
206 if (shared->optimization_disabled()) {
207 if (shared->deopt_count() >= FLAG_max_opt_count) {
208 // If optimization was disabled due to many deoptimizations,
209 // then check if the function is hot and try to reenable optimization.
210 int ticks = shared_code->profiler_ticks();
211 if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
212 shared_code->set_profiler_ticks(0);
213 shared->TryReenableOptimization();
214 } else {
215 shared_code->set_profiler_ticks(ticks + 1);
216 }
217 }
218 continue;
219 }
220 if (function->IsOptimized()) continue;
221
222 int ticks = shared_code->profiler_ticks();
223
224 if (ticks >= kProfilerTicksBeforeOptimization) {
225 int typeinfo, generic, total, type_percentage, generic_percentage;
226 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
227 &generic_percentage);
228 if (type_percentage >= FLAG_type_info_threshold &&
229 generic_percentage <= FLAG_generic_ic_threshold) {
230 // If this particular function hasn't had any ICs patched for enough
231 // ticks, optimize it now.
232 Optimize(function, "hot and stable");
233 } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
234 Optimize(function, "not much type info but very hot");
235 } else {
236 shared_code->set_profiler_ticks(ticks + 1);
237 if (FLAG_trace_opt_verbose) {
238 PrintF("[not yet optimizing ");
239 function->PrintName();
240 PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
241 type_percentage);
242 }
243 }
244 } else if (!any_ic_changed_ &&
245 shared_code->instruction_size() < kMaxSizeEarlyOpt) {
246 // If no IC was patched since the last tick and this function is very
247 // small, optimistically optimize it now.
248 int typeinfo, generic, total, type_percentage, generic_percentage;
249 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
250 &generic_percentage);
251 if (type_percentage >= FLAG_type_info_threshold &&
252 generic_percentage <= FLAG_generic_ic_threshold) {
253 Optimize(function, "small function");
254 } else {
255 shared_code->set_profiler_ticks(ticks + 1);
256 }
257 } else {
258 shared_code->set_profiler_ticks(ticks + 1);
259 }
260 }
261 any_ic_changed_ = false;
262 }
263
264
265 } // namespace internal
266 } // namespace v8
267