• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PROFILER_CPU_PROFILER_H_
6 #define V8_PROFILER_CPU_PROFILER_H_
7 
8 #include <memory>
9 
10 #include "src/allocation.h"
11 #include "src/base/atomic-utils.h"
12 #include "src/base/atomicops.h"
13 #include "src/base/platform/time.h"
14 #include "src/isolate.h"
15 #include "src/libsampler/sampler.h"
16 #include "src/locked-queue.h"
17 #include "src/profiler/circular-queue.h"
18 #include "src/profiler/profiler-listener.h"
19 #include "src/profiler/tick-sample.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 // Forward declarations.
25 class CodeEntry;
26 class CodeMap;
27 class CpuProfile;
28 class CpuProfilesCollection;
29 class ProfileGenerator;
30 
31 #define CODE_EVENTS_TYPE_LIST(V)                         \
32   V(CODE_CREATION, CodeCreateEventRecord)                \
33   V(CODE_MOVE, CodeMoveEventRecord)                      \
34   V(CODE_DISABLE_OPT, CodeDisableOptEventRecord)         \
35   V(CODE_DEOPT, CodeDeoptEventRecord)                    \
36   V(REPORT_BUILTIN, ReportBuiltinEventRecord)
37 
38 
39 class CodeEventRecord {
40  public:
41 #define DECLARE_TYPE(type, ignore) type,
42   enum Type {
43     NONE = 0,
44     CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
45     NUMBER_OF_TYPES
46   };
47 #undef DECLARE_TYPE
48 
49   Type type;
50   mutable unsigned order;
51 };
52 
53 
54 class CodeCreateEventRecord : public CodeEventRecord {
55  public:
56   Address start;
57   CodeEntry* entry;
58   unsigned size;
59 
60   INLINE(void UpdateCodeMap(CodeMap* code_map));
61 };
62 
63 
64 class CodeMoveEventRecord : public CodeEventRecord {
65  public:
66   Address from;
67   Address to;
68 
69   INLINE(void UpdateCodeMap(CodeMap* code_map));
70 };
71 
72 
73 class CodeDisableOptEventRecord : public CodeEventRecord {
74  public:
75   Address start;
76   const char* bailout_reason;
77 
78   INLINE(void UpdateCodeMap(CodeMap* code_map));
79 };
80 
81 
82 class CodeDeoptEventRecord : public CodeEventRecord {
83  public:
84   Address start;
85   const char* deopt_reason;
86   int deopt_id;
87   void* pc;
88   int fp_to_sp_delta;
89 
90   INLINE(void UpdateCodeMap(CodeMap* code_map));
91 };
92 
93 
94 class ReportBuiltinEventRecord : public CodeEventRecord {
95  public:
96   Address start;
97   Builtins::Name builtin_id;
98 
99   INLINE(void UpdateCodeMap(CodeMap* code_map));
100 };
101 
102 
103 class TickSampleEventRecord {
104  public:
105   // The parameterless constructor is used when we dequeue data from
106   // the ticks buffer.
TickSampleEventRecord()107   TickSampleEventRecord() { }
TickSampleEventRecord(unsigned order)108   explicit TickSampleEventRecord(unsigned order) : order(order) { }
109 
110   unsigned order;
111   TickSample sample;
112 };
113 
114 
115 class CodeEventsContainer {
116  public:
117   explicit CodeEventsContainer(
118       CodeEventRecord::Type type = CodeEventRecord::NONE) {
119     generic.type = type;
120   }
121   union  {
122     CodeEventRecord generic;
123 #define DECLARE_CLASS(ignore, type) type type##_;
124     CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
125 #undef DECLARE_CLASS
126   };
127 };
128 
129 
130 // This class implements both the profile events processor thread and
131 // methods called by event producers: VM and stack sampler threads.
132 class ProfilerEventsProcessor : public base::Thread {
133  public:
134   ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
135                           base::TimeDelta period);
136   virtual ~ProfilerEventsProcessor();
137 
138   // Thread control.
139   virtual void Run();
140   void StopSynchronously();
INLINE(bool running ())141   INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
142   void Enqueue(const CodeEventsContainer& event);
143 
144   // Puts current stack into tick sample events buffer.
145   void AddCurrentStack(Isolate* isolate, bool update_stats = false);
146   void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
147 
148   // Tick sample events are filled directly in the buffer of the circular
149   // queue (because the structure is of fixed width, but usually not all
150   // stack frame entries are filled.) This method returns a pointer to the
151   // next record of the buffer.
152   inline TickSample* StartTickSample();
153   inline void FinishTickSample();
154 
155   // SamplingCircularQueue has stricter alignment requirements than a normal new
156   // can fulfil, so we need to provide our own new/delete here.
157   void* operator new(size_t size);
158   void operator delete(void* ptr);
159 
sampler()160   sampler::Sampler* sampler() { return sampler_.get(); }
161 
162  private:
163   // Called from events processing thread (Run() method.)
164   bool ProcessCodeEvent();
165 
166   enum SampleProcessingResult {
167     OneSampleProcessed,
168     FoundSampleForNextCodeEvent,
169     NoSamplesInQueue
170   };
171   SampleProcessingResult ProcessOneSample();
172 
173   ProfileGenerator* generator_;
174   std::unique_ptr<sampler::Sampler> sampler_;
175   base::Atomic32 running_;
176   const base::TimeDelta period_;  // Samples & code events processing period.
177   LockedQueue<CodeEventsContainer> events_buffer_;
178   static const size_t kTickSampleBufferSize = 1 * MB;
179   static const size_t kTickSampleQueueLength =
180       kTickSampleBufferSize / sizeof(TickSampleEventRecord);
181   SamplingCircularQueue<TickSampleEventRecord,
182                         kTickSampleQueueLength> ticks_buffer_;
183   LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
184   base::AtomicNumber<unsigned> last_code_event_id_;
185   unsigned last_processed_code_event_id_;
186 };
187 
188 class CpuProfiler : public CodeEventObserver {
189  public:
190   explicit CpuProfiler(Isolate* isolate);
191 
192   CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
193               ProfileGenerator* test_generator,
194               ProfilerEventsProcessor* test_processor);
195 
196   ~CpuProfiler() override;
197 
198   void set_sampling_interval(base::TimeDelta value);
199   void CollectSample();
200   void StartProfiling(const char* title, bool record_samples = false);
201   void StartProfiling(String* title, bool record_samples);
202   CpuProfile* StopProfiling(const char* title);
203   CpuProfile* StopProfiling(String* title);
204   int GetProfilesCount();
205   CpuProfile* GetProfile(int index);
206   void DeleteAllProfiles();
207   void DeleteProfile(CpuProfile* profile);
208 
209   void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
210 
is_profiling()211   bool is_profiling() const { return is_profiling_; }
212 
generator()213   ProfileGenerator* generator() const { return generator_.get(); }
processor()214   ProfilerEventsProcessor* processor() const { return processor_.get(); }
isolate()215   Isolate* isolate() const { return isolate_; }
216 
217  private:
218   void StartProcessorIfNotStarted();
219   void StopProcessorIfLastProfile(const char* title);
220   void StopProcessor();
221   void ResetProfiles();
222   void LogBuiltins();
223   void CreateEntriesForRuntimeCallStats();
224 
225   Isolate* const isolate_;
226   base::TimeDelta sampling_interval_;
227   std::unique_ptr<CpuProfilesCollection> profiles_;
228   std::unique_ptr<ProfileGenerator> generator_;
229   std::unique_ptr<ProfilerEventsProcessor> processor_;
230   std::vector<std::unique_ptr<CodeEntry>> static_entries_;
231   bool saved_is_logging_;
232   bool is_profiling_;
233 
234   DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
235 };
236 
237 }  // namespace internal
238 }  // namespace v8
239 
240 
241 #endif  // V8_PROFILER_CPU_PROFILER_H_
242