• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/profiler/cpu-profiler.h"
6 
7 #include <unordered_map>
8 #include <utility>
9 
10 #include "src/base/lazy-instance.h"
11 #include "src/base/template-utils.h"
12 #include "src/debug/debug.h"
13 #include "src/execution/frames-inl.h"
14 #include "src/execution/v8threads.h"
15 #include "src/execution/vm-state-inl.h"
16 #include "src/libsampler/sampler.h"
17 #include "src/logging/counters.h"
18 #include "src/logging/log.h"
19 #include "src/profiler/cpu-profiler-inl.h"
20 #include "src/profiler/profiler-stats.h"
21 #include "src/profiler/symbolizer.h"
22 #include "src/utils/locked-queue-inl.h"
23 #include "src/wasm/wasm-engine.h"
24 
25 namespace v8 {
26 namespace internal {
27 
28 static const int kProfilerStackSize = 64 * KB;
29 
30 class CpuSampler : public sampler::Sampler {
31  public:
CpuSampler(Isolate * isolate,SamplingEventsProcessor * processor)32   CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
33       : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
34         processor_(processor),
35         threadId_(ThreadId::Current()) {}
36 
SampleStack(const v8::RegisterState & regs)37   void SampleStack(const v8::RegisterState& regs) override {
38     Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
39     if (v8::Locker::IsActive() &&
40         !isolate->thread_manager()->IsLockedByThread(threadId_)) {
41       ProfilerStats::Instance()->AddReason(
42           ProfilerStats::Reason::kIsolateNotLocked);
43       return;
44     }
45     TickSample* sample = processor_->StartTickSample();
46     if (sample == nullptr) {
47       ProfilerStats::Instance()->AddReason(
48           ProfilerStats::Reason::kTickBufferFull);
49       return;
50     }
51     // Every bailout up until here resulted in a dropped sample. From now on,
52     // the sample is created in the buffer.
53     sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame,
54                  /* update_stats */ true,
55                  /* use_simulator_reg_state */ true, processor_->period());
56     if (is_counting_samples_ && !sample->timestamp.IsNull()) {
57       if (sample->state == JS) ++js_sample_count_;
58       if (sample->state == EXTERNAL) ++external_sample_count_;
59     }
60     processor_->FinishTickSample();
61   }
62 
63  private:
64   SamplingEventsProcessor* processor_;
65   ThreadId threadId_;
66 };
67 
ProfilingScope(Isolate * isolate,ProfilerListener * listener)68 ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
69     : isolate_(isolate), listener_(listener) {
70   size_t profiler_count = isolate_->num_cpu_profilers();
71   profiler_count++;
72   isolate_->set_num_cpu_profilers(profiler_count);
73   isolate_->set_is_profiling(true);
74   isolate_->wasm_engine()->EnableCodeLogging(isolate_);
75 
76   Logger* logger = isolate_->logger();
77   logger->AddCodeEventListener(listener_);
78   // Populate the ProfilerCodeObserver with the initial functions and
79   // callbacks on the heap.
80   DCHECK(isolate_->heap()->HasBeenSetUp());
81 
82   if (!FLAG_prof_browser_mode) {
83     logger->LogCodeObjects();
84   }
85   logger->LogCompiledFunctions();
86   logger->LogAccessorCallbacks();
87 }
88 
~ProfilingScope()89 ProfilingScope::~ProfilingScope() {
90   isolate_->logger()->RemoveCodeEventListener(listener_);
91 
92   size_t profiler_count = isolate_->num_cpu_profilers();
93   DCHECK_GT(profiler_count, 0);
94   profiler_count--;
95   isolate_->set_num_cpu_profilers(profiler_count);
96   if (profiler_count == 0) isolate_->set_is_profiling(false);
97 }
98 
ProfilerEventsProcessor(Isolate * isolate,Symbolizer * symbolizer,ProfilerCodeObserver * code_observer)99 ProfilerEventsProcessor::ProfilerEventsProcessor(
100     Isolate* isolate, Symbolizer* symbolizer,
101     ProfilerCodeObserver* code_observer)
102     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
103       symbolizer_(symbolizer),
104       code_observer_(code_observer),
105       last_code_event_id_(0),
106       last_processed_code_event_id_(0),
107       isolate_(isolate) {
108   DCHECK(!code_observer_->processor());
109   code_observer_->set_processor(this);
110 }
111 
SamplingEventsProcessor(Isolate * isolate,Symbolizer * symbolizer,ProfilerCodeObserver * code_observer,CpuProfilesCollection * profiles,base::TimeDelta period,bool use_precise_sampling)112 SamplingEventsProcessor::SamplingEventsProcessor(
113     Isolate* isolate, Symbolizer* symbolizer,
114     ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
115     base::TimeDelta period, bool use_precise_sampling)
116     : ProfilerEventsProcessor(isolate, symbolizer, code_observer),
117       sampler_(new CpuSampler(isolate, this)),
118       profiles_(profiles),
119       period_(period),
120       use_precise_sampling_(use_precise_sampling) {
121   sampler_->Start();
122 }
123 
~SamplingEventsProcessor()124 SamplingEventsProcessor::~SamplingEventsProcessor() { sampler_->Stop(); }
125 
~ProfilerEventsProcessor()126 ProfilerEventsProcessor::~ProfilerEventsProcessor() {
127   DCHECK_EQ(code_observer_->processor(), this);
128   code_observer_->clear_processor();
129 }
130 
Enqueue(const CodeEventsContainer & event)131 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
132   event.generic.order = ++last_code_event_id_;
133   events_buffer_.Enqueue(event);
134 }
135 
AddDeoptStack(Address from,int fp_to_sp_delta)136 void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
137   TickSampleEventRecord record(last_code_event_id_);
138   RegisterState regs;
139   Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
140   regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
141   regs.fp = reinterpret_cast<void*>(fp);
142   regs.pc = reinterpret_cast<void*>(from);
143   record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
144                      false);
145   ticks_from_vm_buffer_.Enqueue(record);
146 }
147 
AddCurrentStack(bool update_stats)148 void ProfilerEventsProcessor::AddCurrentStack(bool update_stats) {
149   TickSampleEventRecord record(last_code_event_id_);
150   RegisterState regs;
151   StackFrameIterator it(isolate_);
152   if (!it.done()) {
153     StackFrame* frame = it.frame();
154     regs.sp = reinterpret_cast<void*>(frame->sp());
155     regs.fp = reinterpret_cast<void*>(frame->fp());
156     regs.pc = reinterpret_cast<void*>(frame->pc());
157   }
158   record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
159                      false);
160   ticks_from_vm_buffer_.Enqueue(record);
161 }
162 
AddSample(TickSample sample)163 void ProfilerEventsProcessor::AddSample(TickSample sample) {
164   TickSampleEventRecord record(last_code_event_id_);
165   record.sample = sample;
166   ticks_from_vm_buffer_.Enqueue(record);
167 }
168 
StopSynchronously()169 void ProfilerEventsProcessor::StopSynchronously() {
170   bool expected = true;
171   if (!running_.compare_exchange_strong(expected, false,
172                                         std::memory_order_relaxed))
173     return;
174   {
175     base::MutexGuard guard(&running_mutex_);
176     running_cond_.NotifyOne();
177   }
178   Join();
179 }
180 
181 
ProcessCodeEvent()182 bool ProfilerEventsProcessor::ProcessCodeEvent() {
183   CodeEventsContainer record;
184   if (events_buffer_.Dequeue(&record)) {
185     code_observer_->CodeEventHandlerInternal(record);
186     last_processed_code_event_id_ = record.generic.order;
187     return true;
188   }
189   return false;
190 }
191 
CodeEventHandler(const CodeEventsContainer & evt_rec)192 void ProfilerEventsProcessor::CodeEventHandler(
193     const CodeEventsContainer& evt_rec) {
194   switch (evt_rec.generic.type) {
195     case CodeEventRecord::CODE_CREATION:
196     case CodeEventRecord::CODE_MOVE:
197     case CodeEventRecord::CODE_DISABLE_OPT:
198       Enqueue(evt_rec);
199       break;
200     case CodeEventRecord::CODE_DEOPT: {
201       const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
202       Address pc = rec->pc;
203       int fp_to_sp_delta = rec->fp_to_sp_delta;
204       Enqueue(evt_rec);
205       AddDeoptStack(pc, fp_to_sp_delta);
206       break;
207     }
208     case CodeEventRecord::NONE:
209     case CodeEventRecord::REPORT_BUILTIN:
210       UNREACHABLE();
211   }
212 }
213 
SymbolizeAndAddToProfiles(const TickSampleEventRecord * record)214 void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
215     const TickSampleEventRecord* record) {
216   Symbolizer::SymbolizedSample symbolized =
217       symbolizer_->SymbolizeTickSample(record->sample);
218   profiles_->AddPathToCurrentProfiles(
219       record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
220       record->sample.update_stats, record->sample.sampling_interval);
221 }
222 
223 ProfilerEventsProcessor::SampleProcessingResult
ProcessOneSample()224 SamplingEventsProcessor::ProcessOneSample() {
225   TickSampleEventRecord record1;
226   if (ticks_from_vm_buffer_.Peek(&record1) &&
227       (record1.order == last_processed_code_event_id_)) {
228     TickSampleEventRecord record;
229     ticks_from_vm_buffer_.Dequeue(&record);
230     SymbolizeAndAddToProfiles(&record);
231     return OneSampleProcessed;
232   }
233 
234   const TickSampleEventRecord* record = ticks_buffer_.Peek();
235   if (record == nullptr) {
236     if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
237     return FoundSampleForNextCodeEvent;
238   }
239   if (record->order != last_processed_code_event_id_) {
240     return FoundSampleForNextCodeEvent;
241   }
242   SymbolizeAndAddToProfiles(record);
243   ticks_buffer_.Remove();
244   return OneSampleProcessed;
245 }
246 
Run()247 void SamplingEventsProcessor::Run() {
248   base::MutexGuard guard(&running_mutex_);
249   while (running_.load(std::memory_order_relaxed)) {
250     base::TimeTicks nextSampleTime =
251         base::TimeTicks::HighResolutionNow() + period_;
252     base::TimeTicks now;
253     SampleProcessingResult result;
254     // Keep processing existing events until we need to do next sample
255     // or the ticks buffer is empty.
256     do {
257       result = ProcessOneSample();
258       if (result == FoundSampleForNextCodeEvent) {
259         // All ticks of the current last_processed_code_event_id_ are
260         // processed, proceed to the next code event.
261         ProcessCodeEvent();
262       }
263       now = base::TimeTicks::HighResolutionNow();
264     } while (result != NoSamplesInQueue && now < nextSampleTime);
265 
266     if (nextSampleTime > now) {
267 #if V8_OS_WIN
268       if (use_precise_sampling_ &&
269           nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
270         // Do not use Sleep on Windows as it is very imprecise, with up to 16ms
271         // jitter, which is unacceptable for short profile intervals.
272         while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
273         }
274       } else  // NOLINT
275 #else
276       USE(use_precise_sampling_);
277 #endif  // V8_OS_WIN
278       {
279         // Allow another thread to interrupt the delay between samples in the
280         // event of profiler shutdown.
281         while (now < nextSampleTime &&
282                running_cond_.WaitFor(&running_mutex_, nextSampleTime - now)) {
283           // If true was returned, we got interrupted before the timeout
284           // elapsed. If this was not due to a change in running state, a
285           // spurious wakeup occurred (thus we should continue to wait).
286           if (!running_.load(std::memory_order_relaxed)) {
287             break;
288           }
289           now = base::TimeTicks::HighResolutionNow();
290         }
291       }
292     }
293 
294     // Schedule next sample.
295     sampler_->DoSample();
296   }
297 
298   // Process remaining tick events.
299   do {
300     SampleProcessingResult result;
301     do {
302       result = ProcessOneSample();
303     } while (result == OneSampleProcessed);
304   } while (ProcessCodeEvent());
305 }
306 
SetSamplingInterval(base::TimeDelta period)307 void SamplingEventsProcessor::SetSamplingInterval(base::TimeDelta period) {
308   if (period_ == period) return;
309   StopSynchronously();
310 
311   period_ = period;
312   running_.store(true, std::memory_order_relaxed);
313 
314   StartSynchronously();
315 }
316 
operator new(size_t size)317 void* SamplingEventsProcessor::operator new(size_t size) {
318   return AlignedAlloc(size, alignof(SamplingEventsProcessor));
319 }
320 
operator delete(void * ptr)321 void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
322 
ProfilerCodeObserver(Isolate * isolate)323 ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate)
324     : isolate_(isolate), processor_(nullptr) {
325   CreateEntriesForRuntimeCallStats();
326   LogBuiltins();
327 }
328 
ClearCodeMap()329 void ProfilerCodeObserver::ClearCodeMap() { code_map_.Clear(); }
330 
CodeEventHandler(const CodeEventsContainer & evt_rec)331 void ProfilerCodeObserver::CodeEventHandler(
332     const CodeEventsContainer& evt_rec) {
333   if (processor_) {
334     processor_->CodeEventHandler(evt_rec);
335     return;
336   }
337   CodeEventHandlerInternal(evt_rec);
338 }
339 
CodeEventHandlerInternal(const CodeEventsContainer & evt_rec)340 void ProfilerCodeObserver::CodeEventHandlerInternal(
341     const CodeEventsContainer& evt_rec) {
342   CodeEventsContainer record = evt_rec;
343   switch (evt_rec.generic.type) {
344 #define PROFILER_TYPE_CASE(type, clss)        \
345   case CodeEventRecord::type:                 \
346     record.clss##_.UpdateCodeMap(&code_map_); \
347     break;
348 
349     CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
350 
351 #undef PROFILER_TYPE_CASE
352     default:
353       break;
354   }
355 }
356 
CreateEntriesForRuntimeCallStats()357 void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
358   RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
359   for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
360     RuntimeCallCounter* counter = rcs->GetCounter(i);
361     DCHECK(counter->name());
362     auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
363                                "native V8Runtime");
364     code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
365   }
366 }
367 
LogBuiltins()368 void ProfilerCodeObserver::LogBuiltins() {
369   Builtins* builtins = isolate_->builtins();
370   DCHECK(builtins->is_initialized());
371   for (int i = 0; i < Builtins::builtin_count; i++) {
372     CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
373     ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
374     Builtins::Name id = static_cast<Builtins::Name>(i);
375     Code code = builtins->builtin(id);
376     rec->instruction_start = code.InstructionStart();
377     rec->instruction_size = code.InstructionSize();
378     rec->builtin_id = id;
379     CodeEventHandlerInternal(evt_rec);
380   }
381 }
382 
GetProfilesCount()383 int CpuProfiler::GetProfilesCount() {
384   // The count of profiles doesn't depend on a security token.
385   return static_cast<int>(profiles_->profiles()->size());
386 }
387 
388 
GetProfile(int index)389 CpuProfile* CpuProfiler::GetProfile(int index) {
390   return profiles_->profiles()->at(index).get();
391 }
392 
393 
DeleteAllProfiles()394 void CpuProfiler::DeleteAllProfiles() {
395   if (is_profiling_) StopProcessor();
396   ResetProfiles();
397 }
398 
399 
DeleteProfile(CpuProfile * profile)400 void CpuProfiler::DeleteProfile(CpuProfile* profile) {
401   profiles_->RemoveProfile(profile);
402   if (profiles_->profiles()->empty() && !is_profiling_) {
403     // If this was the last profile, clean up all accessory data as well.
404     ResetProfiles();
405   }
406 }
407 
408 namespace {
409 
410 class CpuProfilersManager {
411  public:
AddProfiler(Isolate * isolate,CpuProfiler * profiler)412   void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
413     base::MutexGuard lock(&mutex_);
414     profilers_.emplace(isolate, profiler);
415   }
416 
RemoveProfiler(Isolate * isolate,CpuProfiler * profiler)417   void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
418     base::MutexGuard lock(&mutex_);
419     auto range = profilers_.equal_range(isolate);
420     for (auto it = range.first; it != range.second; ++it) {
421       if (it->second != profiler) continue;
422       profilers_.erase(it);
423       return;
424     }
425     UNREACHABLE();
426   }
427 
CallCollectSample(Isolate * isolate)428   void CallCollectSample(Isolate* isolate) {
429     base::MutexGuard lock(&mutex_);
430     auto range = profilers_.equal_range(isolate);
431     for (auto it = range.first; it != range.second; ++it) {
432       it->second->CollectSample();
433     }
434   }
435 
436  private:
437   std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
438   base::Mutex mutex_;
439 };
440 
441 DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
442 
443 }  // namespace
444 
CpuProfiler(Isolate * isolate,CpuProfilingNamingMode naming_mode,CpuProfilingLoggingMode logging_mode)445 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
446                          CpuProfilingLoggingMode logging_mode)
447     : CpuProfiler(isolate, naming_mode, logging_mode,
448                   new CpuProfilesCollection(isolate), nullptr, nullptr) {}
449 
CpuProfiler(Isolate * isolate,CpuProfilingNamingMode naming_mode,CpuProfilingLoggingMode logging_mode,CpuProfilesCollection * test_profiles,Symbolizer * test_symbolizer,ProfilerEventsProcessor * test_processor)450 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
451                          CpuProfilingLoggingMode logging_mode,
452                          CpuProfilesCollection* test_profiles,
453                          Symbolizer* test_symbolizer,
454                          ProfilerEventsProcessor* test_processor)
455     : isolate_(isolate),
456       naming_mode_(naming_mode),
457       logging_mode_(logging_mode),
458       base_sampling_interval_(base::TimeDelta::FromMicroseconds(
459           FLAG_cpu_profiler_sampling_interval)),
460       profiles_(test_profiles),
461       symbolizer_(test_symbolizer),
462       processor_(test_processor),
463       code_observer_(isolate),
464       is_profiling_(false) {
465   profiles_->set_cpu_profiler(this);
466   GetProfilersManager()->AddProfiler(isolate, this);
467 
468   if (logging_mode == kEagerLogging) EnableLogging();
469 }
470 
~CpuProfiler()471 CpuProfiler::~CpuProfiler() {
472   DCHECK(!is_profiling_);
473   GetProfilersManager()->RemoveProfiler(isolate_, this);
474 
475   DisableLogging();
476 }
477 
set_sampling_interval(base::TimeDelta value)478 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
479   DCHECK(!is_profiling_);
480   base_sampling_interval_ = value;
481 }
482 
set_use_precise_sampling(bool value)483 void CpuProfiler::set_use_precise_sampling(bool value) {
484   DCHECK(!is_profiling_);
485   use_precise_sampling_ = value;
486 }
487 
ResetProfiles()488 void CpuProfiler::ResetProfiles() {
489   profiles_.reset(new CpuProfilesCollection(isolate_));
490   profiles_->set_cpu_profiler(this);
491   symbolizer_.reset();
492   if (!profiling_scope_) {
493     profiler_listener_.reset();
494     code_observer_.ClearCodeMap();
495   }
496 }
497 
EnableLogging()498 void CpuProfiler::EnableLogging() {
499   if (profiling_scope_) return;
500 
501   if (!profiler_listener_) {
502     profiler_listener_.reset(
503         new ProfilerListener(isolate_, &code_observer_, naming_mode_));
504   }
505   profiling_scope_.reset(
506       new ProfilingScope(isolate_, profiler_listener_.get()));
507 }
508 
DisableLogging()509 void CpuProfiler::DisableLogging() {
510   if (!profiling_scope_) return;
511 
512   DCHECK(profiler_listener_);
513   profiling_scope_.reset();
514 }
515 
ComputeSamplingInterval() const516 base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
517   return profiles_->GetCommonSamplingInterval();
518 }
519 
AdjustSamplingInterval()520 void CpuProfiler::AdjustSamplingInterval() {
521   if (!processor_) return;
522 
523   base::TimeDelta base_interval = ComputeSamplingInterval();
524   processor_->SetSamplingInterval(base_interval);
525 }
526 
527 // static
CollectSample(Isolate * isolate)528 void CpuProfiler::CollectSample(Isolate* isolate) {
529   GetProfilersManager()->CallCollectSample(isolate);
530 }
531 
CollectSample()532 void CpuProfiler::CollectSample() {
533   if (processor_) {
534     processor_->AddCurrentStack();
535   }
536 }
537 
StartProfiling(const char * title,CpuProfilingOptions options)538 CpuProfilingStatus CpuProfiler::StartProfiling(const char* title,
539                                                CpuProfilingOptions options) {
540   StartProfilingStatus status = profiles_->StartProfiling(title, options);
541 
542   // TODO(nicodubus): Revisit logic for if we want to do anything different for
543   // kAlreadyStarted
544   if (status == CpuProfilingStatus::kStarted ||
545       status == CpuProfilingStatus::kAlreadyStarted) {
546     TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
547     AdjustSamplingInterval();
548     StartProcessorIfNotStarted();
549   }
550 
551   return status;
552 }
553 
StartProfiling(String title,CpuProfilingOptions options)554 CpuProfilingStatus CpuProfiler::StartProfiling(String title,
555                                                CpuProfilingOptions options) {
556   return StartProfiling(profiles_->GetName(title), options);
557 }
558 
StartProcessorIfNotStarted()559 void CpuProfiler::StartProcessorIfNotStarted() {
560   if (processor_) {
561     processor_->AddCurrentStack();
562     return;
563   }
564 
565   if (!profiling_scope_) {
566     DCHECK_EQ(logging_mode_, kLazyLogging);
567     EnableLogging();
568   }
569 
570   if (!symbolizer_) {
571     symbolizer_ = std::make_unique<Symbolizer>(code_observer_.code_map());
572   }
573 
574   base::TimeDelta sampling_interval = ComputeSamplingInterval();
575   processor_.reset(new SamplingEventsProcessor(
576       isolate_, symbolizer_.get(), &code_observer_, profiles_.get(),
577       sampling_interval, use_precise_sampling_));
578   is_profiling_ = true;
579 
580   // Enable stack sampling.
581   processor_->AddCurrentStack();
582   processor_->StartSynchronously();
583 }
584 
StopProfiling(const char * title)585 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
586   if (!is_profiling_) return nullptr;
587   StopProcessorIfLastProfile(title);
588   CpuProfile* result = profiles_->StopProfiling(title);
589   AdjustSamplingInterval();
590   return result;
591 }
592 
StopProfiling(String title)593 CpuProfile* CpuProfiler::StopProfiling(String title) {
594   return StopProfiling(profiles_->GetName(title));
595 }
596 
StopProcessorIfLastProfile(const char * title)597 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
598   if (!profiles_->IsLastProfile(title)) return;
599   StopProcessor();
600 }
601 
StopProcessor()602 void CpuProfiler::StopProcessor() {
603   is_profiling_ = false;
604   processor_->StopSynchronously();
605   processor_.reset();
606 
607   DCHECK(profiling_scope_);
608   if (logging_mode_ == kLazyLogging) {
609     DisableLogging();
610   }
611 }
612 }  // namespace internal
613 }  // namespace v8
614