• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "runtime-profiler.h"
31 
32 #include "assembler.h"
33 #include "code-stubs.h"
34 #include "compilation-cache.h"
35 #include "deoptimizer.h"
36 #include "execution.h"
37 #include "global-handles.h"
38 #include "isolate-inl.h"
39 #include "mark-compact.h"
40 #include "platform.h"
41 #include "scopeinfo.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 
47 // Optimization sampler constants.
48 static const int kSamplerFrameCount = 2;
49 
50 // Constants for statistical profiler.
51 static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
52 
53 static const int kSamplerTicksBetweenThresholdAdjustment = 32;
54 
55 static const int kSamplerThresholdInit = 3;
56 static const int kSamplerThresholdMin = 1;
57 static const int kSamplerThresholdDelta = 1;
58 
59 static const int kSamplerThresholdSizeFactorInit = 3;
60 
61 static const int kSizeLimit = 1500;
62 
63 // Constants for counter based profiler.
64 
65 // Number of times a function has to be seen on the stack before it is
66 // optimized.
67 static const int kProfilerTicksBeforeOptimization = 2;
68 // If a function does not have enough type info (according to
69 // FLAG_type_info_threshold), but has seen a huge number of ticks,
70 // optimize it as it is.
71 static const int kTicksWhenNotEnoughTypeInfo = 100;
72 // We only have one byte to store the number of ticks.
73 STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
74 
75 // Maximum size in bytes of generated code for a function to be optimized
76 // the very first time it is seen on the stack.
77 static const int kMaxSizeEarlyOpt = 500;
78 
79 
80 Atomic32 RuntimeProfiler::state_ = 0;
81 
82 // TODO(isolates): Clean up the semaphore when it is no longer required.
83 static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
84 
85 #ifdef DEBUG
86 bool RuntimeProfiler::has_been_globally_set_up_ = false;
87 #endif
88 bool RuntimeProfiler::enabled_ = false;
89 
90 
RuntimeProfiler(Isolate * isolate)91 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
92     : isolate_(isolate),
93       sampler_threshold_(kSamplerThresholdInit),
94       sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
95       sampler_ticks_until_threshold_adjustment_(
96           kSamplerTicksBetweenThresholdAdjustment),
97       sampler_window_position_(0) {
98   ClearSampleBuffer();
99 }
100 
101 
GlobalSetup()102 void RuntimeProfiler::GlobalSetup() {
103   ASSERT(!has_been_globally_set_up_);
104   enabled_ = V8::UseCrankshaft() && FLAG_opt;
105 #ifdef DEBUG
106   has_been_globally_set_up_ = true;
107 #endif
108 }
109 
110 
GetICCounts(JSFunction * function,int * ic_with_type_info_count,int * ic_total_count,int * percentage)111 static void GetICCounts(JSFunction* function,
112                         int* ic_with_type_info_count,
113                         int* ic_total_count,
114                         int* percentage) {
115   *ic_total_count = 0;
116   *ic_with_type_info_count = 0;
117   Object* raw_info =
118       function->shared()->code()->type_feedback_info();
119   if (raw_info->IsTypeFeedbackInfo()) {
120     TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
121     *ic_with_type_info_count = info->ic_with_type_info_count();
122     *ic_total_count = info->ic_total_count();
123   }
124   *percentage = *ic_total_count > 0
125       ? 100 * *ic_with_type_info_count / *ic_total_count
126       : 100;
127 }
128 
129 
Optimize(JSFunction * function,const char * reason)130 void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
131   ASSERT(function->IsOptimizable());
132   if (FLAG_trace_opt) {
133     PrintF("[marking ");
134     function->PrintName();
135     PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
136     PrintF(" for recompilation, reason: %s", reason);
137     if (FLAG_type_info_threshold > 0) {
138       int typeinfo, total, percentage;
139       GetICCounts(function, &typeinfo, &total, &percentage);
140       PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
141     }
142     PrintF("]\n");
143   }
144 
145   // The next call to the function will trigger optimization.
146   function->MarkForLazyRecompilation();
147 }
148 
149 
AttemptOnStackReplacement(JSFunction * function)150 void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
151   // See AlwaysFullCompiler (in compiler.cc) comment on why we need
152   // Debug::has_break_points().
153   ASSERT(function->IsMarkedForLazyRecompilation());
154   if (!FLAG_use_osr ||
155       isolate_->DebuggerHasBreakPoints() ||
156       function->IsBuiltin()) {
157     return;
158   }
159 
160   SharedFunctionInfo* shared = function->shared();
161   // If the code is not optimizable, don't try OSR.
162   if (!shared->code()->optimizable()) return;
163 
164   // We are not prepared to do OSR for a function that already has an
165   // allocated arguments object.  The optimized code would bypass it for
166   // arguments accesses, which is unsound.  Don't try OSR.
167   if (shared->uses_arguments()) return;
168 
169   // We're using on-stack replacement: patch the unoptimized code so that
170   // any back edge in any unoptimized frame will trigger on-stack
171   // replacement for that frame.
172   if (FLAG_trace_osr) {
173     PrintF("[patching stack checks in ");
174     function->PrintName();
175     PrintF(" for on-stack replacement]\n");
176   }
177 
178   // Get the stack check stub code object to match against.  We aren't
179   // prepared to generate it, but we don't expect to have to.
180   bool found_code = false;
181   Code* stack_check_code = NULL;
182 #if defined(V8_TARGET_ARCH_IA32) || \
183     defined(V8_TARGET_ARCH_ARM) || \
184     defined(V8_TARGET_ARCH_MIPS)
185   if (FLAG_count_based_interrupts) {
186     InterruptStub interrupt_stub;
187     found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
188   } else  // NOLINT
189 #endif
190   {  // NOLINT
191     StackCheckStub check_stub;
192     found_code = check_stub.FindCodeInCache(&stack_check_code);
193   }
194   if (found_code) {
195     Code* replacement_code =
196         isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
197     Code* unoptimized_code = shared->code();
198     Deoptimizer::PatchStackCheckCode(unoptimized_code,
199                                      stack_check_code,
200                                      replacement_code);
201   }
202 }
203 
204 
ClearSampleBuffer()205 void RuntimeProfiler::ClearSampleBuffer() {
206   memset(sampler_window_, 0, sizeof(sampler_window_));
207   memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
208 }
209 
210 
LookupSample(JSFunction * function)211 int RuntimeProfiler::LookupSample(JSFunction* function) {
212   int weight = 0;
213   for (int i = 0; i < kSamplerWindowSize; i++) {
214     Object* sample = sampler_window_[i];
215     if (sample != NULL) {
216       if (function == sample) {
217         weight += sampler_window_weight_[i];
218       }
219     }
220   }
221   return weight;
222 }
223 
224 
AddSample(JSFunction * function,int weight)225 void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
226   ASSERT(IsPowerOf2(kSamplerWindowSize));
227   sampler_window_[sampler_window_position_] = function;
228   sampler_window_weight_[sampler_window_position_] = weight;
229   sampler_window_position_ = (sampler_window_position_ + 1) &
230       (kSamplerWindowSize - 1);
231 }
232 
233 
OptimizeNow()234 void RuntimeProfiler::OptimizeNow() {
235   HandleScope scope(isolate_);
236 
237   // Run through the JavaScript frames and collect them. If we already
238   // have a sample of the function, we mark it for optimizations
239   // (eagerly or lazily).
240   JSFunction* samples[kSamplerFrameCount];
241   int sample_count = 0;
242   int frame_count = 0;
243   int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
244                                                  : kSamplerFrameCount;
245   for (JavaScriptFrameIterator it(isolate_);
246        frame_count++ < frame_count_limit && !it.done();
247        it.Advance()) {
248     JavaScriptFrame* frame = it.frame();
249     JSFunction* function = JSFunction::cast(frame->function());
250 
251     if (!FLAG_watch_ic_patching) {
252       // Adjust threshold each time we have processed
253       // a certain number of ticks.
254       if (sampler_ticks_until_threshold_adjustment_ > 0) {
255         sampler_ticks_until_threshold_adjustment_--;
256         if (sampler_ticks_until_threshold_adjustment_ <= 0) {
257           // If the threshold is not already at the minimum
258           // modify and reset the ticks until next adjustment.
259           if (sampler_threshold_ > kSamplerThresholdMin) {
260             sampler_threshold_ -= kSamplerThresholdDelta;
261             sampler_ticks_until_threshold_adjustment_ =
262                 kSamplerTicksBetweenThresholdAdjustment;
263           }
264         }
265       }
266     }
267 
268     Code* shared_code = function->shared()->code();
269     if (shared_code->kind() != Code::FUNCTION) continue;
270 
271     if (function->IsMarkedForLazyRecompilation()) {
272       int nesting = shared_code->allow_osr_at_loop_nesting_level();
273       if (nesting == 0) AttemptOnStackReplacement(function);
274       int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
275       shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
276     }
277 
278     // Do not record non-optimizable functions.
279     if (!function->IsOptimizable()) continue;
280     if (function->shared()->optimization_disabled()) continue;
281 
282     // Only record top-level code on top of the execution stack and
283     // avoid optimizing excessively large scripts since top-level code
284     // will be executed only once.
285     const int kMaxToplevelSourceSize = 10 * 1024;
286     if (function->shared()->is_toplevel()
287         && (frame_count > 1
288             || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
289       continue;
290     }
291 
292     if (FLAG_watch_ic_patching) {
293       int ticks = shared_code->profiler_ticks();
294 
295       if (ticks >= kProfilerTicksBeforeOptimization) {
296         int typeinfo, total, percentage;
297         GetICCounts(function, &typeinfo, &total, &percentage);
298         if (percentage >= FLAG_type_info_threshold) {
299           // If this particular function hasn't had any ICs patched for enough
300           // ticks, optimize it now.
301           Optimize(function, "hot and stable");
302         } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
303           Optimize(function, "not much type info but very hot");
304         } else {
305           shared_code->set_profiler_ticks(ticks + 1);
306           if (FLAG_trace_opt_verbose) {
307             PrintF("[not yet optimizing ");
308             function->PrintName();
309             PrintF(", not enough type info: %d/%d (%d%%)]\n",
310                    typeinfo, total, percentage);
311           }
312         }
313       } else if (!any_ic_changed_ &&
314           shared_code->instruction_size() < kMaxSizeEarlyOpt) {
315         // If no IC was patched since the last tick and this function is very
316         // small, optimistically optimize it now.
317         Optimize(function, "small function");
318       } else if (!code_generated_ &&
319           !any_ic_changed_ &&
320           total_code_generated_ > 0 &&
321           total_code_generated_ < 2000) {
322         // If no code was generated and no IC was patched since the last tick,
323         // but a little code has already been generated since last Reset(),
324         // then type info might already be stable and we can optimize now.
325         Optimize(function, "stable on startup");
326       } else {
327         shared_code->set_profiler_ticks(ticks + 1);
328       }
329     } else {  // !FLAG_watch_ic_patching
330       samples[sample_count++] = function;
331 
332       int function_size = function->shared()->SourceSize();
333       int threshold_size_factor = (function_size > kSizeLimit)
334           ? sampler_threshold_size_factor_
335           : 1;
336 
337       int threshold = sampler_threshold_ * threshold_size_factor;
338 
339       if (LookupSample(function) >= threshold) {
340         Optimize(function, "sampler window lookup");
341       }
342     }
343   }
344   if (FLAG_watch_ic_patching) {
345     any_ic_changed_ = false;
346     code_generated_ = false;
347   } else {  // !FLAG_watch_ic_patching
348     // Add the collected functions as samples. It's important not to do
349     // this as part of collecting them because this will interfere with
350     // the sample lookup in case of recursive functions.
351     for (int i = 0; i < sample_count; i++) {
352       AddSample(samples[i], kSamplerFrameWeight[i]);
353     }
354   }
355 }
356 
357 
NotifyTick()358 void RuntimeProfiler::NotifyTick() {
359 #if defined(V8_TARGET_ARCH_IA32) || \
360     defined(V8_TARGET_ARCH_ARM) || \
361     defined(V8_TARGET_ARCH_MIPS)
362   if (FLAG_count_based_interrupts) return;
363 #endif
364   isolate_->stack_guard()->RequestRuntimeProfilerTick();
365 }
366 
367 
SetUp()368 void RuntimeProfiler::SetUp() {
369   ASSERT(has_been_globally_set_up_);
370   if (!FLAG_watch_ic_patching) {
371     ClearSampleBuffer();
372   }
373   // If the ticker hasn't already started, make sure to do so to get
374   // the ticks for the runtime profiler.
375   if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
376 }
377 
378 
Reset()379 void RuntimeProfiler::Reset() {
380   if (FLAG_watch_ic_patching) {
381     total_code_generated_ = 0;
382   } else {  // !FLAG_watch_ic_patching
383     sampler_threshold_ = kSamplerThresholdInit;
384     sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
385     sampler_ticks_until_threshold_adjustment_ =
386         kSamplerTicksBetweenThresholdAdjustment;
387   }
388 }
389 
390 
TearDown()391 void RuntimeProfiler::TearDown() {
392   // Nothing to do.
393 }
394 
395 
SamplerWindowSize()396 int RuntimeProfiler::SamplerWindowSize() {
397   return kSamplerWindowSize;
398 }
399 
400 
401 // Update the pointers in the sampler window after a GC.
UpdateSamplesAfterScavenge()402 void RuntimeProfiler::UpdateSamplesAfterScavenge() {
403   for (int i = 0; i < kSamplerWindowSize; i++) {
404     Object* function = sampler_window_[i];
405     if (function != NULL && isolate_->heap()->InNewSpace(function)) {
406       MapWord map_word = HeapObject::cast(function)->map_word();
407       if (map_word.IsForwardingAddress()) {
408         sampler_window_[i] = map_word.ToForwardingAddress();
409       } else {
410         sampler_window_[i] = NULL;
411       }
412     }
413   }
414 }
415 
416 
HandleWakeUp(Isolate * isolate)417 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
418   // The profiler thread must still be waiting.
419   ASSERT(NoBarrier_Load(&state_) >= 0);
420   // In IsolateEnteredJS we have already incremented the counter and
421   // undid the decrement done by the profiler thread. Increment again
422   // to get the right count of active isolates.
423   NoBarrier_AtomicIncrement(&state_, 1);
424   semaphore.Pointer()->Signal();
425 }
426 
427 
IsSomeIsolateInJS()428 bool RuntimeProfiler::IsSomeIsolateInJS() {
429   return NoBarrier_Load(&state_) > 0;
430 }
431 
432 
WaitForSomeIsolateToEnterJS()433 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
434   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
435   ASSERT(old_state >= -1);
436   if (old_state != 0) return false;
437   semaphore.Pointer()->Wait();
438   return true;
439 }
440 
441 
StopRuntimeProfilerThreadBeforeShutdown(Thread * thread)442 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
443   // Do a fake increment. If the profiler is waiting on the semaphore,
444   // the returned state is 0, which can be left as an initial state in
445   // case profiling is restarted later. If the profiler is not
446   // waiting, the increment will prevent it from waiting, but has to
447   // be undone after the profiler is stopped.
448   Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
449   ASSERT(new_state >= 0);
450   if (new_state == 0) {
451     // The profiler thread is waiting. Wake it up. It must check for
452     // stop conditions before attempting to wait again.
453     semaphore.Pointer()->Signal();
454   }
455   thread->Join();
456   // The profiler thread is now stopped. Undo the increment in case it
457   // was not waiting.
458   if (new_state != 0) {
459     NoBarrier_AtomicIncrement(&state_, -1);
460   }
461 }
462 
463 
RemoveDeadSamples()464 void RuntimeProfiler::RemoveDeadSamples() {
465   for (int i = 0; i < kSamplerWindowSize; i++) {
466     Object* function = sampler_window_[i];
467     if (function != NULL &&
468         !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
469       sampler_window_[i] = NULL;
470     }
471   }
472 }
473 
474 
UpdateSamplesAfterCompact(ObjectVisitor * visitor)475 void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
476   for (int i = 0; i < kSamplerWindowSize; i++) {
477     visitor->VisitPointer(&sampler_window_[i]);
478   }
479 }
480 
481 
SuspendIfNecessary()482 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
483   if (!RuntimeProfiler::IsSomeIsolateInJS()) {
484     return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
485   }
486   return false;
487 }
488 
489 
490 } }  // namespace v8::internal
491